blob: ebcb2c405c5f8511448e3eb914d9e78222de5964 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070066#include "affinity.h"
Don Hiatt243d9f42017-03-20 17:26:20 -070067#include "debugfs.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Harish Chegondi8784ac02016-07-25 13:38:50 -0700126#define DEFAULT_KRCVQS 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700129
130/*
131 * RSM instance allocation
132 * 0 - Verbs
133 * 1 - User Fecn Handling
134 * 2 - Vnic
135 */
136#define RSM_INS_VERBS 0
137#define RSM_INS_FECN 1
138#define RSM_INS_VNIC 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400139
140/* Bit offset into the GUID which carries HFI id information */
141#define GUID_HFI_INDEX_SHIFT 39
142
143/* extract the emulation revision */
144#define emulator_rev(dd) ((dd)->irev >> 8)
145/* parallel and serial emulation versions are 3 and 4 respectively */
146#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
148
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700149/* RSM fields for Verbs */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150/* packet type */
151#define IB_PACKET_TYPE 2ull
152#define QW_SHIFT 6ull
153/* QPN[7..1] */
154#define QPN_WIDTH 7ull
155
156/* LRH.BTH: QW 0, OFFSET 48 - for match */
157#define LRH_BTH_QW 0ull
158#define LRH_BTH_BIT_OFFSET 48ull
159#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161#define LRH_BTH_SELECT
162#define LRH_BTH_MASK 3ull
163#define LRH_BTH_VALUE 2ull
164
165/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166#define LRH_SC_QW 0ull
167#define LRH_SC_BIT_OFFSET 56ull
168#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170#define LRH_SC_MASK 128ull
171#define LRH_SC_VALUE 0ull
172
173/* SC[n..0] QW 0, OFFSET 60 - for select */
174#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
175
176/* QPN[m+n:1] QW 1, OFFSET 1 */
177#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
178
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700179/* RSM fields for Vnic */
180/* L2_TYPE: QW 0, OFFSET 61 - for match */
181#define L2_TYPE_QW 0ull
182#define L2_TYPE_BIT_OFFSET 61ull
183#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185#define L2_TYPE_MASK 3ull
186#define L2_16B_VALUE 2ull
187
188/* L4_TYPE QW 1, OFFSET 0 - for match */
189#define L4_TYPE_QW 1ull
190#define L4_TYPE_BIT_OFFSET 0ull
191#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193#define L4_16B_TYPE_MASK 0xFFull
194#define L4_16B_ETH_VALUE 0x78ull
195
196/* 16B VESWID - for select */
197#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198/* 16B ENTROPY - for select */
199#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
200
Mike Marciniszyn77241052015-07-30 15:17:43 -0400201/* defines to build power on SC2VL table */
202#define SC2VL_VAL( \
203 num, \
204 sc0, sc0val, \
205 sc1, sc1val, \
206 sc2, sc2val, \
207 sc3, sc3val, \
208 sc4, sc4val, \
209 sc5, sc5val, \
210 sc6, sc6val, \
211 sc7, sc7val) \
212( \
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
221)
222
223#define DC_SC_VL_VAL( \
224 range, \
225 e0, e0val, \
226 e1, e1val, \
227 e2, e2val, \
228 e3, e3val, \
229 e4, e4val, \
230 e5, e5val, \
231 e6, e6val, \
232 e7, e7val, \
233 e8, e8val, \
234 e9, e9val, \
235 e10, e10val, \
236 e11, e11val, \
237 e12, e12val, \
238 e13, e13val, \
239 e14, e14val, \
240 e15, e15val) \
241( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
258)
259
260/* all CceStatus sub-block freeze bits */
261#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265/* all CceStatus sub-block TXE pause bits */
266#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269/* all CceStatus sub-block RXE pause bits */
270#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
271
Jakub Pawlak2b719042016-07-01 16:01:22 -0700272#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
274
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275/*
276 * CCE Error flags.
277 */
278static struct flag_table cce_err_status_flags[] = {
279/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341/*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
361/*41-63 reserved*/
362};
363
364/*
365 * Misc Error flags
366 */
367#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368static struct flag_table misc_err_status_flags[] = {
369/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
382};
383
384/*
385 * TXE PIO Error flags and consequences
386 */
387static struct flag_table pio_err_status_flags[] = {
388/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
389 SEC_WRITE_DROPPED,
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394/* 2*/ FLAG_ENTRY("PioCsrParity",
395 SEC_SPC_FREEZE,
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
398 SEC_SPC_FREEZE,
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403/* 5*/ FLAG_ENTRY("PioPccFifoParity",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406/* 6*/ FLAG_ENTRY("PioPecFifoParity",
407 SEC_SPC_FREEZE,
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
416 SEC_SPC_FREEZE,
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418/*10*/ FLAG_ENTRY("PioSmPktResetParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
425 SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
428 0,
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
431 0,
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439/*17*/ FLAG_ENTRY("PioInitSmIn",
440 0,
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
449 0,
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451/*21*/ FLAG_ENTRY("PioWriteDataParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454/*22*/ FLAG_ENTRY("PioStateMachine",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
464 SEC_SPC_FREEZE,
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466/*26*/ FLAG_ENTRY("PioVlfSopParity",
467 SEC_SPC_FREEZE,
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469/*27*/ FLAG_ENTRY("PioVlFifoParity",
470 SEC_SPC_FREEZE,
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
473 SEC_SPC_FREEZE,
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475/*29*/ FLAG_ENTRY("PioPpmcSopLen",
476 SEC_SPC_FREEZE,
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
478/*30-31 reserved*/
479/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
480 SEC_SPC_FREEZE,
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
483 SEC_SPC_FREEZE,
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
486 SEC_SPC_FREEZE,
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
489 SEC_SPC_FREEZE,
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
491/*36-63 reserved*/
492};
493
494/* TXE PIO errors that cause an SPC freeze */
495#define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
525
526/*
527 * TXE SDMA Error flags
528 */
529static struct flag_table sdma_err_status_flags[] = {
530/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
538/*04-63 reserved*/
539};
540
541/* TXE SDMA errors that cause an SPC freeze */
542#define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
546
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800547/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548#define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
552
Mike Marciniszyn77241052015-07-30 15:17:43 -0400553/*
554 * TXE Egress Error flags
555 */
556#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557static struct flag_table egress_err_status_flags[] = {
558/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
560/* 2 reserved */
561/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
565/* 6 reserved */
566/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
570/* 9-10 reserved */
571/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
652};
653
654/*
655 * TXE Egress Error Info flags
656 */
657#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658static struct flag_table egress_err_info_flags[] = {
659/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
681};
682
683/* TXE Egress errors that cause an SPC freeze */
684#define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
702
703/*
704 * TXE Send error flags
705 */
706#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500708/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400709/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
711};
712
713/*
714 * TXE Send Context Error flags and consequences
715 */
716static struct flag_table sc_err_status_flags[] = {
717/* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720/* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726/* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
732/* 5-63 reserved*/
733};
734
735/*
736 * RXE Receive Error flags
737 */
738#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739static struct flag_table rxe_err_status_flags[] = {
740/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
816};
817
818/* RXE errors that will trigger an SPC freeze */
819#define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
864
865#define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
869
870/*
871 * DCC Error Flags
872 */
873#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
921};
922
923/*
924 * LCB error flags
925 */
926#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927static struct flag_table lcb_err_flags[] = {
928/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
964};
965
966/*
967 * DC8051 Error Flags
968 */
969#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
982};
983
984/*
985 * DC8051 Information Error flags
986 *
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
988 */
989static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800994 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -08001002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
Dean Luick50921be2016-09-25 07:41:53 -07001003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001006};
1007
1008/*
1009 * DC8051 Information Host Information flags
1010 *
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012 */
1013static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
Bartlomiej Dudekddbf2ef2017-06-09 15:59:26 -07001015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
Bartlomiej Dudekddbf2ef2017-06-09 15:59:26 -07001023 FLAG_ENTRY0("Link width downgraded", 0x0200),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024};
1025
Mike Marciniszyn77241052015-07-30 15:17:43 -04001026static u32 encoded_size(u32 size);
1027static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1030 u8 *continuous);
1031static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1038 u8 *device_rev);
1039static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001059static void set_partition_keys(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001060static const char *link_state_name(u32 state);
1061static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062 u32 state);
1063static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064 u64 *out_data);
1065static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd);
1067
1068static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1069 int msecs);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -07001070static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1071 int msecs);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001072static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001073static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001074static void handle_temp_err(struct hfi1_devdata *dd);
1075static void dc_shutdown(struct hfi1_devdata *dd);
1076static void dc_start(struct hfi1_devdata *dd);
Dean Luick8f000f72016-04-12 11:32:06 -07001077static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1078 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001079static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Dean Luickec8a1422017-03-20 17:24:39 -07001080static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001081static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001082
1083/*
1084 * Error interrupt table entry. This is used as input to the interrupt
1085 * "clear down" routine used for all second tier error interrupt register.
1086 * Second tier interrupt registers have a single bit representing them
1087 * in the top-level CceIntStatus.
1088 */
1089struct err_reg_info {
1090 u32 status; /* status CSR offset */
1091 u32 clear; /* clear CSR offset */
1092 u32 mask; /* mask CSR offset */
1093 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1094 const char *desc;
1095};
1096
1097#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1098#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1099#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1100
1101/*
1102 * Helpers for building HFI and DC error interrupt table entries. Different
1103 * helpers are needed because of inconsistent register names.
1104 */
1105#define EE(reg, handler, desc) \
1106 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1107 handler, desc }
1108#define DC_EE1(reg, handler, desc) \
1109 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1110#define DC_EE2(reg, handler, desc) \
1111 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1112
1113/*
1114 * Table of the "misc" grouping of error interrupts. Each entry refers to
1115 * another register containing more information.
1116 */
1117static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1118/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1119/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1120/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1121/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1122/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1123/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1124/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1125/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1126 /* the rest are reserved */
1127};
1128
1129/*
1130 * Index into the Various section of the interrupt sources
1131 * corresponding to the Critical Temperature interrupt.
1132 */
1133#define TCRIT_INT_SOURCE 4
1134
1135/*
1136 * SDMA error interrupt entry - refers to another register containing more
1137 * information.
1138 */
1139static const struct err_reg_info sdma_eng_err =
1140 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1141
1142static const struct err_reg_info various_err[NUM_VARIOUS] = {
1143/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1144/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1145/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1146/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1147/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1148 /* rest are reserved */
1149};
1150
1151/*
1152 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1153 * register can not be derived from the MTU value because 10K is not
1154 * a power of 2. Therefore, we need a constant. Everything else can
1155 * be calculated.
1156 */
1157#define DCC_CFG_PORT_MTU_CAP_10240 7
1158
1159/*
1160 * Table of the DC grouping of error interrupts. Each entry refers to
1161 * another register containing more information.
1162 */
1163static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1164/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1165/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1166/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1167/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1168 /* the rest are reserved */
1169};
1170
1171struct cntr_entry {
1172 /*
1173 * counter name
1174 */
1175 char *name;
1176
1177 /*
1178 * csr to read for name (if applicable)
1179 */
1180 u64 csr;
1181
1182 /*
1183 * offset into dd or ppd to store the counter's value
1184 */
1185 int offset;
1186
1187 /*
1188 * flags
1189 */
1190 u8 flags;
1191
1192 /*
1193 * accessor for stat element, context either dd or ppd
1194 */
Jubin John17fb4f22016-02-14 20:21:52 -08001195 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1196 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001197};
1198
1199#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1200#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1201
1202#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1203{ \
1204 name, \
1205 csr, \
1206 offset, \
1207 flags, \
1208 accessor \
1209}
1210
1211/* 32bit RXE */
1212#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1213CNTR_ELEM(#name, \
1214 (counter * 8 + RCV_COUNTER_ARRAY32), \
1215 0, flags | CNTR_32BIT, \
1216 port_access_u32_csr)
1217
1218#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1219CNTR_ELEM(#name, \
1220 (counter * 8 + RCV_COUNTER_ARRAY32), \
1221 0, flags | CNTR_32BIT, \
1222 dev_access_u32_csr)
1223
1224/* 64bit RXE */
1225#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1226CNTR_ELEM(#name, \
1227 (counter * 8 + RCV_COUNTER_ARRAY64), \
1228 0, flags, \
1229 port_access_u64_csr)
1230
1231#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233 (counter * 8 + RCV_COUNTER_ARRAY64), \
1234 0, flags, \
1235 dev_access_u64_csr)
1236
1237#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1238#define OVR_ELM(ctx) \
1239CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001240 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001241 0, CNTR_NORMAL, port_access_u64_csr)
1242
1243/* 32bit TXE */
1244#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 (counter * 8 + SEND_COUNTER_ARRAY32), \
1247 0, flags | CNTR_32BIT, \
1248 port_access_u32_csr)
1249
1250/* 64bit TXE */
1251#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1252CNTR_ELEM(#name, \
1253 (counter * 8 + SEND_COUNTER_ARRAY64), \
1254 0, flags, \
1255 port_access_u64_csr)
1256
1257# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1258CNTR_ELEM(#name,\
1259 counter * 8 + SEND_COUNTER_ARRAY64, \
1260 0, \
1261 flags, \
1262 dev_access_u64_csr)
1263
1264/* CCE */
1265#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1266CNTR_ELEM(#name, \
1267 (counter * 8 + CCE_COUNTER_ARRAY32), \
1268 0, flags | CNTR_32BIT, \
1269 dev_access_u32_csr)
1270
1271#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1272CNTR_ELEM(#name, \
1273 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1274 0, flags | CNTR_32BIT, \
1275 dev_access_u32_csr)
1276
1277/* DC */
1278#define DC_PERF_CNTR(name, counter, flags) \
1279CNTR_ELEM(#name, \
1280 counter, \
1281 0, \
1282 flags, \
1283 dev_access_u64_csr)
1284
1285#define DC_PERF_CNTR_LCB(name, counter, flags) \
1286CNTR_ELEM(#name, \
1287 counter, \
1288 0, \
1289 flags, \
1290 dc_access_lcb_cntr)
1291
1292/* ibp counters */
1293#define SW_IBP_CNTR(name, cntr) \
1294CNTR_ELEM(#name, \
1295 0, \
1296 0, \
1297 CNTR_SYNTH, \
1298 access_ibp_##cntr)
1299
1300u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1301{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001302 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301303 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001304 }
1305 return -1;
1306}
1307
1308void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1309{
1310 if (dd->flags & HFI1_PRESENT)
1311 writeq(value, (void __iomem *)dd->kregbase + offset);
1312}
1313
1314void __iomem *get_csr_addr(
1315 struct hfi1_devdata *dd,
1316 u32 offset)
1317{
1318 return (void __iomem *)dd->kregbase + offset;
1319}
1320
1321static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1322 int mode, u64 value)
1323{
1324 u64 ret;
1325
Mike Marciniszyn77241052015-07-30 15:17:43 -04001326 if (mode == CNTR_MODE_R) {
1327 ret = read_csr(dd, csr);
1328 } else if (mode == CNTR_MODE_W) {
1329 write_csr(dd, csr, value);
1330 ret = value;
1331 } else {
1332 dd_dev_err(dd, "Invalid cntr register access mode");
1333 return 0;
1334 }
1335
1336 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1337 return ret;
1338}
1339
1340/* Dev Access */
1341static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001342 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001343{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301344 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001345 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001346
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001347 if (entry->flags & CNTR_SDMA) {
1348 if (vl == CNTR_INVALID_VL)
1349 return 0;
1350 csr += 0x100 * vl;
1351 } else {
1352 if (vl != CNTR_INVALID_VL)
1353 return 0;
1354 }
1355 return read_write_csr(dd, csr, mode, data);
1356}
1357
1358static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1359 void *context, int idx, int mode, u64 data)
1360{
1361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1362
1363 if (dd->per_sdma && idx < dd->num_sdma)
1364 return dd->per_sdma[idx].err_cnt;
1365 return 0;
1366}
1367
1368static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1369 void *context, int idx, int mode, u64 data)
1370{
1371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1372
1373 if (dd->per_sdma && idx < dd->num_sdma)
1374 return dd->per_sdma[idx].sdma_int_cnt;
1375 return 0;
1376}
1377
1378static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1379 void *context, int idx, int mode, u64 data)
1380{
1381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1382
1383 if (dd->per_sdma && idx < dd->num_sdma)
1384 return dd->per_sdma[idx].idle_int_cnt;
1385 return 0;
1386}
1387
1388static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1389 void *context, int idx, int mode,
1390 u64 data)
1391{
1392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1393
1394 if (dd->per_sdma && idx < dd->num_sdma)
1395 return dd->per_sdma[idx].progress_int_cnt;
1396 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001397}
1398
1399static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001400 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001401{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301402 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001403
1404 u64 val = 0;
1405 u64 csr = entry->csr;
1406
1407 if (entry->flags & CNTR_VL) {
1408 if (vl == CNTR_INVALID_VL)
1409 return 0;
1410 csr += 8 * vl;
1411 } else {
1412 if (vl != CNTR_INVALID_VL)
1413 return 0;
1414 }
1415
1416 val = read_write_csr(dd, csr, mode, data);
1417 return val;
1418}
1419
1420static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001421 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001422{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301423 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001424 u32 csr = entry->csr;
1425 int ret = 0;
1426
1427 if (vl != CNTR_INVALID_VL)
1428 return 0;
1429 if (mode == CNTR_MODE_R)
1430 ret = read_lcb_csr(dd, csr, &data);
1431 else if (mode == CNTR_MODE_W)
1432 ret = write_lcb_csr(dd, csr, data);
1433
1434 if (ret) {
1435 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1436 return 0;
1437 }
1438
1439 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1440 return data;
1441}
1442
1443/* Port Access */
1444static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001445 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001446{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301447 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001448
1449 if (vl != CNTR_INVALID_VL)
1450 return 0;
1451 return read_write_csr(ppd->dd, entry->csr, mode, data);
1452}
1453
1454static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001455 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001456{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301457 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001458 u64 val;
1459 u64 csr = entry->csr;
1460
1461 if (entry->flags & CNTR_VL) {
1462 if (vl == CNTR_INVALID_VL)
1463 return 0;
1464 csr += 8 * vl;
1465 } else {
1466 if (vl != CNTR_INVALID_VL)
1467 return 0;
1468 }
1469 val = read_write_csr(ppd->dd, csr, mode, data);
1470 return val;
1471}
1472
1473/* Software defined */
1474static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1475 u64 data)
1476{
1477 u64 ret;
1478
1479 if (mode == CNTR_MODE_R) {
1480 ret = *cntr;
1481 } else if (mode == CNTR_MODE_W) {
1482 *cntr = data;
1483 ret = data;
1484 } else {
1485 dd_dev_err(dd, "Invalid cntr sw access mode");
1486 return 0;
1487 }
1488
1489 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1490
1491 return ret;
1492}
1493
1494static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001495 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301497 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001498
1499 if (vl != CNTR_INVALID_VL)
1500 return 0;
1501 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1502}
1503
1504static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001505 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301507 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001508
1509 if (vl != CNTR_INVALID_VL)
1510 return 0;
1511 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1512}
1513
Dean Luick6d014532015-12-01 15:38:23 -05001514static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1515 void *context, int vl, int mode,
1516 u64 data)
1517{
1518 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1519
1520 if (vl != CNTR_INVALID_VL)
1521 return 0;
1522 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1523}
1524
Mike Marciniszyn77241052015-07-30 15:17:43 -04001525static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001526 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001527{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001528 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1529 u64 zero = 0;
1530 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001531
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001532 if (vl == CNTR_INVALID_VL)
1533 counter = &ppd->port_xmit_discards;
1534 else if (vl >= 0 && vl < C_VL_COUNT)
1535 counter = &ppd->port_xmit_discards_vl[vl];
1536 else
1537 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001538
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001539 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001540}
1541
1542static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001543 void *context, int vl, int mode,
1544 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001545{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301546 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001547
1548 if (vl != CNTR_INVALID_VL)
1549 return 0;
1550
1551 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1552 mode, data);
1553}
1554
1555static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001556 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001557{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301558 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001559
1560 if (vl != CNTR_INVALID_VL)
1561 return 0;
1562
1563 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1564 mode, data);
1565}
1566
1567u64 get_all_cpu_total(u64 __percpu *cntr)
1568{
1569 int cpu;
1570 u64 counter = 0;
1571
1572 for_each_possible_cpu(cpu)
1573 counter += *per_cpu_ptr(cntr, cpu);
1574 return counter;
1575}
1576
1577static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1578 u64 __percpu *cntr,
1579 int vl, int mode, u64 data)
1580{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001581 u64 ret = 0;
1582
1583 if (vl != CNTR_INVALID_VL)
1584 return 0;
1585
1586 if (mode == CNTR_MODE_R) {
1587 ret = get_all_cpu_total(cntr) - *z_val;
1588 } else if (mode == CNTR_MODE_W) {
1589 /* A write can only zero the counter */
1590 if (data == 0)
1591 *z_val = get_all_cpu_total(cntr);
1592 else
1593 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1594 } else {
1595 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1596 return 0;
1597 }
1598
1599 return ret;
1600}
1601
1602static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1604{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301605 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
1607 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1608 mode, data);
1609}
1610
1611static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001612 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001613{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301614 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001615
1616 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1617 mode, data);
1618}
1619
1620static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1621 void *context, int vl, int mode, u64 data)
1622{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301623 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001624
1625 return dd->verbs_dev.n_piowait;
1626}
1627
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001628static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1629 void *context, int vl, int mode, u64 data)
1630{
1631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1632
1633 return dd->verbs_dev.n_piodrain;
1634}
1635
Mike Marciniszyn77241052015-07-30 15:17:43 -04001636static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1637 void *context, int vl, int mode, u64 data)
1638{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301639 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001640
1641 return dd->verbs_dev.n_txwait;
1642}
1643
1644static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1645 void *context, int vl, int mode, u64 data)
1646{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301647 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001648
1649 return dd->verbs_dev.n_kmem_wait;
1650}
1651
Dean Luickb4219222015-10-26 10:28:35 -04001652static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001653 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001654{
1655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1656
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001657 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1658 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001659}
1660
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001661/* Software counters for the error status bits within MISC_ERR_STATUS */
1662static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1663 void *context, int vl, int mode,
1664 u64 data)
1665{
1666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1667
1668 return dd->misc_err_status_cnt[12];
1669}
1670
1671static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1672 void *context, int vl, int mode,
1673 u64 data)
1674{
1675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1676
1677 return dd->misc_err_status_cnt[11];
1678}
1679
1680static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1681 void *context, int vl, int mode,
1682 u64 data)
1683{
1684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1685
1686 return dd->misc_err_status_cnt[10];
1687}
1688
1689static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1690 void *context, int vl,
1691 int mode, u64 data)
1692{
1693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1694
1695 return dd->misc_err_status_cnt[9];
1696}
1697
1698static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1699 void *context, int vl, int mode,
1700 u64 data)
1701{
1702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1703
1704 return dd->misc_err_status_cnt[8];
1705}
1706
1707static u64 access_misc_efuse_read_bad_addr_err_cnt(
1708 const struct cntr_entry *entry,
1709 void *context, int vl, int mode, u64 data)
1710{
1711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1712
1713 return dd->misc_err_status_cnt[7];
1714}
1715
1716static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1717 void *context, int vl,
1718 int mode, u64 data)
1719{
1720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1721
1722 return dd->misc_err_status_cnt[6];
1723}
1724
1725static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1726 void *context, int vl, int mode,
1727 u64 data)
1728{
1729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1730
1731 return dd->misc_err_status_cnt[5];
1732}
1733
1734static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1735 void *context, int vl, int mode,
1736 u64 data)
1737{
1738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1739
1740 return dd->misc_err_status_cnt[4];
1741}
1742
1743static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1744 void *context, int vl,
1745 int mode, u64 data)
1746{
1747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1748
1749 return dd->misc_err_status_cnt[3];
1750}
1751
1752static u64 access_misc_csr_write_bad_addr_err_cnt(
1753 const struct cntr_entry *entry,
1754 void *context, int vl, int mode, u64 data)
1755{
1756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1757
1758 return dd->misc_err_status_cnt[2];
1759}
1760
1761static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1762 void *context, int vl,
1763 int mode, u64 data)
1764{
1765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1766
1767 return dd->misc_err_status_cnt[1];
1768}
1769
1770static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1771 void *context, int vl, int mode,
1772 u64 data)
1773{
1774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1775
1776 return dd->misc_err_status_cnt[0];
1777}
1778
1779/*
1780 * Software counter for the aggregate of
1781 * individual CceErrStatus counters
1782 */
1783static u64 access_sw_cce_err_status_aggregated_cnt(
1784 const struct cntr_entry *entry,
1785 void *context, int vl, int mode, u64 data)
1786{
1787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1788
1789 return dd->sw_cce_err_status_aggregate;
1790}
1791
1792/*
1793 * Software counters corresponding to each of the
1794 * error status bits within CceErrStatus
1795 */
1796static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1797 void *context, int vl, int mode,
1798 u64 data)
1799{
1800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1801
1802 return dd->cce_err_status_cnt[40];
1803}
1804
1805static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1806 void *context, int vl, int mode,
1807 u64 data)
1808{
1809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1810
1811 return dd->cce_err_status_cnt[39];
1812}
1813
1814static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1815 void *context, int vl, int mode,
1816 u64 data)
1817{
1818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1819
1820 return dd->cce_err_status_cnt[38];
1821}
1822
1823static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1824 void *context, int vl, int mode,
1825 u64 data)
1826{
1827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1828
1829 return dd->cce_err_status_cnt[37];
1830}
1831
1832static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1833 void *context, int vl, int mode,
1834 u64 data)
1835{
1836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1837
1838 return dd->cce_err_status_cnt[36];
1839}
1840
1841static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1842 const struct cntr_entry *entry,
1843 void *context, int vl, int mode, u64 data)
1844{
1845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1846
1847 return dd->cce_err_status_cnt[35];
1848}
1849
1850static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1851 const struct cntr_entry *entry,
1852 void *context, int vl, int mode, u64 data)
1853{
1854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1855
1856 return dd->cce_err_status_cnt[34];
1857}
1858
1859static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1860 void *context, int vl,
1861 int mode, u64 data)
1862{
1863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1864
1865 return dd->cce_err_status_cnt[33];
1866}
1867
1868static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1869 void *context, int vl, int mode,
1870 u64 data)
1871{
1872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1873
1874 return dd->cce_err_status_cnt[32];
1875}
1876
1877static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1878 void *context, int vl, int mode, u64 data)
1879{
1880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1881
1882 return dd->cce_err_status_cnt[31];
1883}
1884
1885static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1886 void *context, int vl, int mode,
1887 u64 data)
1888{
1889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1890
1891 return dd->cce_err_status_cnt[30];
1892}
1893
1894static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1895 void *context, int vl, int mode,
1896 u64 data)
1897{
1898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1899
1900 return dd->cce_err_status_cnt[29];
1901}
1902
1903static u64 access_pcic_transmit_back_parity_err_cnt(
1904 const struct cntr_entry *entry,
1905 void *context, int vl, int mode, u64 data)
1906{
1907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1908
1909 return dd->cce_err_status_cnt[28];
1910}
1911
1912static u64 access_pcic_transmit_front_parity_err_cnt(
1913 const struct cntr_entry *entry,
1914 void *context, int vl, int mode, u64 data)
1915{
1916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1917
1918 return dd->cce_err_status_cnt[27];
1919}
1920
1921static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1922 void *context, int vl, int mode,
1923 u64 data)
1924{
1925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1926
1927 return dd->cce_err_status_cnt[26];
1928}
1929
1930static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1931 void *context, int vl, int mode,
1932 u64 data)
1933{
1934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1935
1936 return dd->cce_err_status_cnt[25];
1937}
1938
1939static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1940 void *context, int vl, int mode,
1941 u64 data)
1942{
1943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1944
1945 return dd->cce_err_status_cnt[24];
1946}
1947
1948static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1949 void *context, int vl, int mode,
1950 u64 data)
1951{
1952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1953
1954 return dd->cce_err_status_cnt[23];
1955}
1956
1957static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1958 void *context, int vl,
1959 int mode, u64 data)
1960{
1961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1962
1963 return dd->cce_err_status_cnt[22];
1964}
1965
1966static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1967 void *context, int vl, int mode,
1968 u64 data)
1969{
1970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1971
1972 return dd->cce_err_status_cnt[21];
1973}
1974
1975static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1976 const struct cntr_entry *entry,
1977 void *context, int vl, int mode, u64 data)
1978{
1979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1980
1981 return dd->cce_err_status_cnt[20];
1982}
1983
1984static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1985 void *context, int vl,
1986 int mode, u64 data)
1987{
1988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1989
1990 return dd->cce_err_status_cnt[19];
1991}
1992
1993static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1994 void *context, int vl, int mode,
1995 u64 data)
1996{
1997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1998
1999 return dd->cce_err_status_cnt[18];
2000}
2001
2002static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2003 void *context, int vl, int mode,
2004 u64 data)
2005{
2006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2007
2008 return dd->cce_err_status_cnt[17];
2009}
2010
2011static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2012 void *context, int vl, int mode,
2013 u64 data)
2014{
2015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2016
2017 return dd->cce_err_status_cnt[16];
2018}
2019
2020static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2021 void *context, int vl, int mode,
2022 u64 data)
2023{
2024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2025
2026 return dd->cce_err_status_cnt[15];
2027}
2028
2029static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2030 void *context, int vl,
2031 int mode, u64 data)
2032{
2033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2034
2035 return dd->cce_err_status_cnt[14];
2036}
2037
2038static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2039 void *context, int vl, int mode,
2040 u64 data)
2041{
2042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2043
2044 return dd->cce_err_status_cnt[13];
2045}
2046
2047static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2048 const struct cntr_entry *entry,
2049 void *context, int vl, int mode, u64 data)
2050{
2051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2052
2053 return dd->cce_err_status_cnt[12];
2054}
2055
2056static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2057 const struct cntr_entry *entry,
2058 void *context, int vl, int mode, u64 data)
2059{
2060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2061
2062 return dd->cce_err_status_cnt[11];
2063}
2064
2065static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2066 const struct cntr_entry *entry,
2067 void *context, int vl, int mode, u64 data)
2068{
2069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2070
2071 return dd->cce_err_status_cnt[10];
2072}
2073
2074static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2075 const struct cntr_entry *entry,
2076 void *context, int vl, int mode, u64 data)
2077{
2078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2079
2080 return dd->cce_err_status_cnt[9];
2081}
2082
2083static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2084 const struct cntr_entry *entry,
2085 void *context, int vl, int mode, u64 data)
2086{
2087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2088
2089 return dd->cce_err_status_cnt[8];
2090}
2091
2092static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2093 void *context, int vl,
2094 int mode, u64 data)
2095{
2096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2097
2098 return dd->cce_err_status_cnt[7];
2099}
2100
2101static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2102 const struct cntr_entry *entry,
2103 void *context, int vl, int mode, u64 data)
2104{
2105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2106
2107 return dd->cce_err_status_cnt[6];
2108}
2109
2110static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2111 void *context, int vl, int mode,
2112 u64 data)
2113{
2114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2115
2116 return dd->cce_err_status_cnt[5];
2117}
2118
2119static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2120 void *context, int vl, int mode,
2121 u64 data)
2122{
2123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2124
2125 return dd->cce_err_status_cnt[4];
2126}
2127
2128static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2129 const struct cntr_entry *entry,
2130 void *context, int vl, int mode, u64 data)
2131{
2132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2133
2134 return dd->cce_err_status_cnt[3];
2135}
2136
2137static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2138 void *context, int vl,
2139 int mode, u64 data)
2140{
2141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2142
2143 return dd->cce_err_status_cnt[2];
2144}
2145
2146static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2147 void *context, int vl,
2148 int mode, u64 data)
2149{
2150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2151
2152 return dd->cce_err_status_cnt[1];
2153}
2154
2155static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2156 void *context, int vl, int mode,
2157 u64 data)
2158{
2159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2160
2161 return dd->cce_err_status_cnt[0];
2162}
2163
2164/*
2165 * Software counters corresponding to each of the
2166 * error status bits within RcvErrStatus
2167 */
2168static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2169 void *context, int vl, int mode,
2170 u64 data)
2171{
2172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2173
2174 return dd->rcv_err_status_cnt[63];
2175}
2176
2177static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2178 void *context, int vl,
2179 int mode, u64 data)
2180{
2181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2182
2183 return dd->rcv_err_status_cnt[62];
2184}
2185
2186static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2187 void *context, int vl, int mode,
2188 u64 data)
2189{
2190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2191
2192 return dd->rcv_err_status_cnt[61];
2193}
2194
2195static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2196 void *context, int vl, int mode,
2197 u64 data)
2198{
2199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2200
2201 return dd->rcv_err_status_cnt[60];
2202}
2203
2204static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2205 void *context, int vl,
2206 int mode, u64 data)
2207{
2208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2209
2210 return dd->rcv_err_status_cnt[59];
2211}
2212
2213static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2214 void *context, int vl,
2215 int mode, u64 data)
2216{
2217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2218
2219 return dd->rcv_err_status_cnt[58];
2220}
2221
2222static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2223 void *context, int vl, int mode,
2224 u64 data)
2225{
2226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2227
2228 return dd->rcv_err_status_cnt[57];
2229}
2230
2231static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2232 void *context, int vl, int mode,
2233 u64 data)
2234{
2235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2236
2237 return dd->rcv_err_status_cnt[56];
2238}
2239
2240static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2241 void *context, int vl, int mode,
2242 u64 data)
2243{
2244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2245
2246 return dd->rcv_err_status_cnt[55];
2247}
2248
2249static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2250 const struct cntr_entry *entry,
2251 void *context, int vl, int mode, u64 data)
2252{
2253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2254
2255 return dd->rcv_err_status_cnt[54];
2256}
2257
2258static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2259 const struct cntr_entry *entry,
2260 void *context, int vl, int mode, u64 data)
2261{
2262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2263
2264 return dd->rcv_err_status_cnt[53];
2265}
2266
2267static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2268 void *context, int vl,
2269 int mode, u64 data)
2270{
2271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2272
2273 return dd->rcv_err_status_cnt[52];
2274}
2275
2276static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2277 void *context, int vl,
2278 int mode, u64 data)
2279{
2280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2281
2282 return dd->rcv_err_status_cnt[51];
2283}
2284
2285static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2286 void *context, int vl,
2287 int mode, u64 data)
2288{
2289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2290
2291 return dd->rcv_err_status_cnt[50];
2292}
2293
2294static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2295 void *context, int vl,
2296 int mode, u64 data)
2297{
2298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2299
2300 return dd->rcv_err_status_cnt[49];
2301}
2302
2303static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2304 void *context, int vl,
2305 int mode, u64 data)
2306{
2307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2308
2309 return dd->rcv_err_status_cnt[48];
2310}
2311
2312static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2313 void *context, int vl,
2314 int mode, u64 data)
2315{
2316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2317
2318 return dd->rcv_err_status_cnt[47];
2319}
2320
2321static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2322 void *context, int vl, int mode,
2323 u64 data)
2324{
2325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2326
2327 return dd->rcv_err_status_cnt[46];
2328}
2329
2330static u64 access_rx_hq_intr_csr_parity_err_cnt(
2331 const struct cntr_entry *entry,
2332 void *context, int vl, int mode, u64 data)
2333{
2334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2335
2336 return dd->rcv_err_status_cnt[45];
2337}
2338
2339static u64 access_rx_lookup_csr_parity_err_cnt(
2340 const struct cntr_entry *entry,
2341 void *context, int vl, int mode, u64 data)
2342{
2343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2344
2345 return dd->rcv_err_status_cnt[44];
2346}
2347
2348static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2349 const struct cntr_entry *entry,
2350 void *context, int vl, int mode, u64 data)
2351{
2352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2353
2354 return dd->rcv_err_status_cnt[43];
2355}
2356
2357static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2358 const struct cntr_entry *entry,
2359 void *context, int vl, int mode, u64 data)
2360{
2361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2362
2363 return dd->rcv_err_status_cnt[42];
2364}
2365
2366static u64 access_rx_lookup_des_part2_parity_err_cnt(
2367 const struct cntr_entry *entry,
2368 void *context, int vl, int mode, u64 data)
2369{
2370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2371
2372 return dd->rcv_err_status_cnt[41];
2373}
2374
2375static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2376 const struct cntr_entry *entry,
2377 void *context, int vl, int mode, u64 data)
2378{
2379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2380
2381 return dd->rcv_err_status_cnt[40];
2382}
2383
2384static u64 access_rx_lookup_des_part1_unc_err_cnt(
2385 const struct cntr_entry *entry,
2386 void *context, int vl, int mode, u64 data)
2387{
2388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2389
2390 return dd->rcv_err_status_cnt[39];
2391}
2392
2393static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2394 const struct cntr_entry *entry,
2395 void *context, int vl, int mode, u64 data)
2396{
2397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2398
2399 return dd->rcv_err_status_cnt[38];
2400}
2401
2402static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2403 const struct cntr_entry *entry,
2404 void *context, int vl, int mode, u64 data)
2405{
2406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2407
2408 return dd->rcv_err_status_cnt[37];
2409}
2410
2411static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2412 const struct cntr_entry *entry,
2413 void *context, int vl, int mode, u64 data)
2414{
2415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2416
2417 return dd->rcv_err_status_cnt[36];
2418}
2419
2420static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2421 const struct cntr_entry *entry,
2422 void *context, int vl, int mode, u64 data)
2423{
2424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2425
2426 return dd->rcv_err_status_cnt[35];
2427}
2428
2429static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2430 const struct cntr_entry *entry,
2431 void *context, int vl, int mode, u64 data)
2432{
2433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2434
2435 return dd->rcv_err_status_cnt[34];
2436}
2437
2438static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2439 const struct cntr_entry *entry,
2440 void *context, int vl, int mode, u64 data)
2441{
2442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2443
2444 return dd->rcv_err_status_cnt[33];
2445}
2446
2447static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2448 void *context, int vl, int mode,
2449 u64 data)
2450{
2451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2452
2453 return dd->rcv_err_status_cnt[32];
2454}
2455
2456static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2457 void *context, int vl, int mode,
2458 u64 data)
2459{
2460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2461
2462 return dd->rcv_err_status_cnt[31];
2463}
2464
2465static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2466 void *context, int vl, int mode,
2467 u64 data)
2468{
2469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2470
2471 return dd->rcv_err_status_cnt[30];
2472}
2473
2474static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2475 void *context, int vl, int mode,
2476 u64 data)
2477{
2478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2479
2480 return dd->rcv_err_status_cnt[29];
2481}
2482
2483static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2484 void *context, int vl,
2485 int mode, u64 data)
2486{
2487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2488
2489 return dd->rcv_err_status_cnt[28];
2490}
2491
2492static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2493 const struct cntr_entry *entry,
2494 void *context, int vl, int mode, u64 data)
2495{
2496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2497
2498 return dd->rcv_err_status_cnt[27];
2499}
2500
2501static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2502 const struct cntr_entry *entry,
2503 void *context, int vl, int mode, u64 data)
2504{
2505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2506
2507 return dd->rcv_err_status_cnt[26];
2508}
2509
2510static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2511 const struct cntr_entry *entry,
2512 void *context, int vl, int mode, u64 data)
2513{
2514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2515
2516 return dd->rcv_err_status_cnt[25];
2517}
2518
2519static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2520 const struct cntr_entry *entry,
2521 void *context, int vl, int mode, u64 data)
2522{
2523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2524
2525 return dd->rcv_err_status_cnt[24];
2526}
2527
2528static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2529 const struct cntr_entry *entry,
2530 void *context, int vl, int mode, u64 data)
2531{
2532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2533
2534 return dd->rcv_err_status_cnt[23];
2535}
2536
2537static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2538 const struct cntr_entry *entry,
2539 void *context, int vl, int mode, u64 data)
2540{
2541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2542
2543 return dd->rcv_err_status_cnt[22];
2544}
2545
2546static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2547 const struct cntr_entry *entry,
2548 void *context, int vl, int mode, u64 data)
2549{
2550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2551
2552 return dd->rcv_err_status_cnt[21];
2553}
2554
2555static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2556 const struct cntr_entry *entry,
2557 void *context, int vl, int mode, u64 data)
2558{
2559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2560
2561 return dd->rcv_err_status_cnt[20];
2562}
2563
2564static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2565 const struct cntr_entry *entry,
2566 void *context, int vl, int mode, u64 data)
2567{
2568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2569
2570 return dd->rcv_err_status_cnt[19];
2571}
2572
2573static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2574 void *context, int vl,
2575 int mode, u64 data)
2576{
2577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2578
2579 return dd->rcv_err_status_cnt[18];
2580}
2581
2582static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2583 void *context, int vl,
2584 int mode, u64 data)
2585{
2586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2587
2588 return dd->rcv_err_status_cnt[17];
2589}
2590
2591static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2592 const struct cntr_entry *entry,
2593 void *context, int vl, int mode, u64 data)
2594{
2595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2596
2597 return dd->rcv_err_status_cnt[16];
2598}
2599
2600static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2601 const struct cntr_entry *entry,
2602 void *context, int vl, int mode, u64 data)
2603{
2604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2605
2606 return dd->rcv_err_status_cnt[15];
2607}
2608
2609static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2610 void *context, int vl,
2611 int mode, u64 data)
2612{
2613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2614
2615 return dd->rcv_err_status_cnt[14];
2616}
2617
2618static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2619 void *context, int vl,
2620 int mode, u64 data)
2621{
2622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2623
2624 return dd->rcv_err_status_cnt[13];
2625}
2626
2627static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2628 void *context, int vl, int mode,
2629 u64 data)
2630{
2631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2632
2633 return dd->rcv_err_status_cnt[12];
2634}
2635
2636static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2637 void *context, int vl, int mode,
2638 u64 data)
2639{
2640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2641
2642 return dd->rcv_err_status_cnt[11];
2643}
2644
2645static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2646 void *context, int vl, int mode,
2647 u64 data)
2648{
2649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2650
2651 return dd->rcv_err_status_cnt[10];
2652}
2653
2654static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2655 void *context, int vl, int mode,
2656 u64 data)
2657{
2658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2659
2660 return dd->rcv_err_status_cnt[9];
2661}
2662
2663static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2664 void *context, int vl, int mode,
2665 u64 data)
2666{
2667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2668
2669 return dd->rcv_err_status_cnt[8];
2670}
2671
2672static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2673 const struct cntr_entry *entry,
2674 void *context, int vl, int mode, u64 data)
2675{
2676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2677
2678 return dd->rcv_err_status_cnt[7];
2679}
2680
2681static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2682 const struct cntr_entry *entry,
2683 void *context, int vl, int mode, u64 data)
2684{
2685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2686
2687 return dd->rcv_err_status_cnt[6];
2688}
2689
2690static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2691 void *context, int vl, int mode,
2692 u64 data)
2693{
2694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2695
2696 return dd->rcv_err_status_cnt[5];
2697}
2698
2699static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2700 void *context, int vl, int mode,
2701 u64 data)
2702{
2703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2704
2705 return dd->rcv_err_status_cnt[4];
2706}
2707
2708static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2709 void *context, int vl, int mode,
2710 u64 data)
2711{
2712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2713
2714 return dd->rcv_err_status_cnt[3];
2715}
2716
2717static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2718 void *context, int vl, int mode,
2719 u64 data)
2720{
2721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2722
2723 return dd->rcv_err_status_cnt[2];
2724}
2725
2726static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2727 void *context, int vl, int mode,
2728 u64 data)
2729{
2730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2731
2732 return dd->rcv_err_status_cnt[1];
2733}
2734
2735static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2736 void *context, int vl, int mode,
2737 u64 data)
2738{
2739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2740
2741 return dd->rcv_err_status_cnt[0];
2742}
2743
2744/*
2745 * Software counters corresponding to each of the
2746 * error status bits within SendPioErrStatus
2747 */
2748static u64 access_pio_pec_sop_head_parity_err_cnt(
2749 const struct cntr_entry *entry,
2750 void *context, int vl, int mode, u64 data)
2751{
2752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2753
2754 return dd->send_pio_err_status_cnt[35];
2755}
2756
2757static u64 access_pio_pcc_sop_head_parity_err_cnt(
2758 const struct cntr_entry *entry,
2759 void *context, int vl, int mode, u64 data)
2760{
2761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2762
2763 return dd->send_pio_err_status_cnt[34];
2764}
2765
2766static u64 access_pio_last_returned_cnt_parity_err_cnt(
2767 const struct cntr_entry *entry,
2768 void *context, int vl, int mode, u64 data)
2769{
2770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2771
2772 return dd->send_pio_err_status_cnt[33];
2773}
2774
2775static u64 access_pio_current_free_cnt_parity_err_cnt(
2776 const struct cntr_entry *entry,
2777 void *context, int vl, int mode, u64 data)
2778{
2779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2780
2781 return dd->send_pio_err_status_cnt[32];
2782}
2783
2784static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2785 void *context, int vl, int mode,
2786 u64 data)
2787{
2788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2789
2790 return dd->send_pio_err_status_cnt[31];
2791}
2792
2793static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2794 void *context, int vl, int mode,
2795 u64 data)
2796{
2797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2798
2799 return dd->send_pio_err_status_cnt[30];
2800}
2801
2802static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2803 void *context, int vl, int mode,
2804 u64 data)
2805{
2806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2807
2808 return dd->send_pio_err_status_cnt[29];
2809}
2810
2811static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2812 const struct cntr_entry *entry,
2813 void *context, int vl, int mode, u64 data)
2814{
2815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2816
2817 return dd->send_pio_err_status_cnt[28];
2818}
2819
2820static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2821 void *context, int vl, int mode,
2822 u64 data)
2823{
2824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2825
2826 return dd->send_pio_err_status_cnt[27];
2827}
2828
2829static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2830 void *context, int vl, int mode,
2831 u64 data)
2832{
2833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2834
2835 return dd->send_pio_err_status_cnt[26];
2836}
2837
2838static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2839 void *context, int vl,
2840 int mode, u64 data)
2841{
2842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2843
2844 return dd->send_pio_err_status_cnt[25];
2845}
2846
2847static u64 access_pio_block_qw_count_parity_err_cnt(
2848 const struct cntr_entry *entry,
2849 void *context, int vl, int mode, u64 data)
2850{
2851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2852
2853 return dd->send_pio_err_status_cnt[24];
2854}
2855
2856static u64 access_pio_write_qw_valid_parity_err_cnt(
2857 const struct cntr_entry *entry,
2858 void *context, int vl, int mode, u64 data)
2859{
2860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2861
2862 return dd->send_pio_err_status_cnt[23];
2863}
2864
2865static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2866 void *context, int vl, int mode,
2867 u64 data)
2868{
2869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2870
2871 return dd->send_pio_err_status_cnt[22];
2872}
2873
2874static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2875 void *context, int vl,
2876 int mode, u64 data)
2877{
2878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2879
2880 return dd->send_pio_err_status_cnt[21];
2881}
2882
2883static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2884 void *context, int vl,
2885 int mode, u64 data)
2886{
2887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2888
2889 return dd->send_pio_err_status_cnt[20];
2890}
2891
2892static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2893 void *context, int vl,
2894 int mode, u64 data)
2895{
2896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2897
2898 return dd->send_pio_err_status_cnt[19];
2899}
2900
2901static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2902 const struct cntr_entry *entry,
2903 void *context, int vl, int mode, u64 data)
2904{
2905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2906
2907 return dd->send_pio_err_status_cnt[18];
2908}
2909
2910static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2911 void *context, int vl, int mode,
2912 u64 data)
2913{
2914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2915
2916 return dd->send_pio_err_status_cnt[17];
2917}
2918
2919static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2920 void *context, int vl, int mode,
2921 u64 data)
2922{
2923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2924
2925 return dd->send_pio_err_status_cnt[16];
2926}
2927
2928static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2929 const struct cntr_entry *entry,
2930 void *context, int vl, int mode, u64 data)
2931{
2932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2933
2934 return dd->send_pio_err_status_cnt[15];
2935}
2936
2937static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2938 const struct cntr_entry *entry,
2939 void *context, int vl, int mode, u64 data)
2940{
2941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2942
2943 return dd->send_pio_err_status_cnt[14];
2944}
2945
2946static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2947 const struct cntr_entry *entry,
2948 void *context, int vl, int mode, u64 data)
2949{
2950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2951
2952 return dd->send_pio_err_status_cnt[13];
2953}
2954
2955static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2956 const struct cntr_entry *entry,
2957 void *context, int vl, int mode, u64 data)
2958{
2959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2960
2961 return dd->send_pio_err_status_cnt[12];
2962}
2963
2964static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2965 const struct cntr_entry *entry,
2966 void *context, int vl, int mode, u64 data)
2967{
2968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2969
2970 return dd->send_pio_err_status_cnt[11];
2971}
2972
2973static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2974 const struct cntr_entry *entry,
2975 void *context, int vl, int mode, u64 data)
2976{
2977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2978
2979 return dd->send_pio_err_status_cnt[10];
2980}
2981
2982static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2983 const struct cntr_entry *entry,
2984 void *context, int vl, int mode, u64 data)
2985{
2986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2987
2988 return dd->send_pio_err_status_cnt[9];
2989}
2990
2991static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2992 const struct cntr_entry *entry,
2993 void *context, int vl, int mode, u64 data)
2994{
2995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2996
2997 return dd->send_pio_err_status_cnt[8];
2998}
2999
3000static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3001 const struct cntr_entry *entry,
3002 void *context, int vl, int mode, u64 data)
3003{
3004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3005
3006 return dd->send_pio_err_status_cnt[7];
3007}
3008
3009static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3010 void *context, int vl, int mode,
3011 u64 data)
3012{
3013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3014
3015 return dd->send_pio_err_status_cnt[6];
3016}
3017
3018static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3019 void *context, int vl, int mode,
3020 u64 data)
3021{
3022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3023
3024 return dd->send_pio_err_status_cnt[5];
3025}
3026
3027static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3028 void *context, int vl, int mode,
3029 u64 data)
3030{
3031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3032
3033 return dd->send_pio_err_status_cnt[4];
3034}
3035
3036static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3037 void *context, int vl, int mode,
3038 u64 data)
3039{
3040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3041
3042 return dd->send_pio_err_status_cnt[3];
3043}
3044
3045static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3046 void *context, int vl, int mode,
3047 u64 data)
3048{
3049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3050
3051 return dd->send_pio_err_status_cnt[2];
3052}
3053
3054static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3055 void *context, int vl,
3056 int mode, u64 data)
3057{
3058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3059
3060 return dd->send_pio_err_status_cnt[1];
3061}
3062
3063static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3064 void *context, int vl, int mode,
3065 u64 data)
3066{
3067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3068
3069 return dd->send_pio_err_status_cnt[0];
3070}
3071
3072/*
3073 * Software counters corresponding to each of the
3074 * error status bits within SendDmaErrStatus
3075 */
3076static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3077 const struct cntr_entry *entry,
3078 void *context, int vl, int mode, u64 data)
3079{
3080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3081
3082 return dd->send_dma_err_status_cnt[3];
3083}
3084
3085static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3086 const struct cntr_entry *entry,
3087 void *context, int vl, int mode, u64 data)
3088{
3089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3090
3091 return dd->send_dma_err_status_cnt[2];
3092}
3093
3094static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3095 void *context, int vl, int mode,
3096 u64 data)
3097{
3098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3099
3100 return dd->send_dma_err_status_cnt[1];
3101}
3102
3103static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3104 void *context, int vl, int mode,
3105 u64 data)
3106{
3107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3108
3109 return dd->send_dma_err_status_cnt[0];
3110}
3111
3112/*
3113 * Software counters corresponding to each of the
3114 * error status bits within SendEgressErrStatus
3115 */
3116static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3117 const struct cntr_entry *entry,
3118 void *context, int vl, int mode, u64 data)
3119{
3120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3121
3122 return dd->send_egress_err_status_cnt[63];
3123}
3124
3125static u64 access_tx_read_sdma_memory_csr_err_cnt(
3126 const struct cntr_entry *entry,
3127 void *context, int vl, int mode, u64 data)
3128{
3129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3130
3131 return dd->send_egress_err_status_cnt[62];
3132}
3133
3134static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3135 void *context, int vl, int mode,
3136 u64 data)
3137{
3138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3139
3140 return dd->send_egress_err_status_cnt[61];
3141}
3142
3143static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3144 void *context, int vl,
3145 int mode, u64 data)
3146{
3147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3148
3149 return dd->send_egress_err_status_cnt[60];
3150}
3151
3152static u64 access_tx_read_sdma_memory_cor_err_cnt(
3153 const struct cntr_entry *entry,
3154 void *context, int vl, int mode, u64 data)
3155{
3156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3157
3158 return dd->send_egress_err_status_cnt[59];
3159}
3160
3161static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3162 void *context, int vl, int mode,
3163 u64 data)
3164{
3165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3166
3167 return dd->send_egress_err_status_cnt[58];
3168}
3169
3170static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3171 void *context, int vl, int mode,
3172 u64 data)
3173{
3174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3175
3176 return dd->send_egress_err_status_cnt[57];
3177}
3178
3179static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3180 void *context, int vl, int mode,
3181 u64 data)
3182{
3183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3184
3185 return dd->send_egress_err_status_cnt[56];
3186}
3187
3188static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3189 void *context, int vl, int mode,
3190 u64 data)
3191{
3192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3193
3194 return dd->send_egress_err_status_cnt[55];
3195}
3196
3197static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3198 void *context, int vl, int mode,
3199 u64 data)
3200{
3201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3202
3203 return dd->send_egress_err_status_cnt[54];
3204}
3205
3206static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3207 void *context, int vl, int mode,
3208 u64 data)
3209{
3210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3211
3212 return dd->send_egress_err_status_cnt[53];
3213}
3214
3215static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3216 void *context, int vl, int mode,
3217 u64 data)
3218{
3219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3220
3221 return dd->send_egress_err_status_cnt[52];
3222}
3223
3224static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3225 void *context, int vl, int mode,
3226 u64 data)
3227{
3228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3229
3230 return dd->send_egress_err_status_cnt[51];
3231}
3232
3233static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3234 void *context, int vl, int mode,
3235 u64 data)
3236{
3237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3238
3239 return dd->send_egress_err_status_cnt[50];
3240}
3241
3242static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3243 void *context, int vl, int mode,
3244 u64 data)
3245{
3246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3247
3248 return dd->send_egress_err_status_cnt[49];
3249}
3250
3251static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3252 void *context, int vl, int mode,
3253 u64 data)
3254{
3255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3256
3257 return dd->send_egress_err_status_cnt[48];
3258}
3259
3260static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3261 void *context, int vl, int mode,
3262 u64 data)
3263{
3264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3265
3266 return dd->send_egress_err_status_cnt[47];
3267}
3268
3269static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3270 void *context, int vl, int mode,
3271 u64 data)
3272{
3273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3274
3275 return dd->send_egress_err_status_cnt[46];
3276}
3277
3278static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3279 void *context, int vl, int mode,
3280 u64 data)
3281{
3282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3283
3284 return dd->send_egress_err_status_cnt[45];
3285}
3286
3287static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3288 void *context, int vl,
3289 int mode, u64 data)
3290{
3291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3292
3293 return dd->send_egress_err_status_cnt[44];
3294}
3295
3296static u64 access_tx_read_sdma_memory_unc_err_cnt(
3297 const struct cntr_entry *entry,
3298 void *context, int vl, int mode, u64 data)
3299{
3300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3301
3302 return dd->send_egress_err_status_cnt[43];
3303}
3304
3305static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3306 void *context, int vl, int mode,
3307 u64 data)
3308{
3309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3310
3311 return dd->send_egress_err_status_cnt[42];
3312}
3313
3314static u64 access_tx_credit_return_partiy_err_cnt(
3315 const struct cntr_entry *entry,
3316 void *context, int vl, int mode, u64 data)
3317{
3318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3319
3320 return dd->send_egress_err_status_cnt[41];
3321}
3322
3323static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3324 const struct cntr_entry *entry,
3325 void *context, int vl, int mode, u64 data)
3326{
3327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3328
3329 return dd->send_egress_err_status_cnt[40];
3330}
3331
3332static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3333 const struct cntr_entry *entry,
3334 void *context, int vl, int mode, u64 data)
3335{
3336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3337
3338 return dd->send_egress_err_status_cnt[39];
3339}
3340
3341static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3342 const struct cntr_entry *entry,
3343 void *context, int vl, int mode, u64 data)
3344{
3345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3346
3347 return dd->send_egress_err_status_cnt[38];
3348}
3349
3350static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3351 const struct cntr_entry *entry,
3352 void *context, int vl, int mode, u64 data)
3353{
3354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3355
3356 return dd->send_egress_err_status_cnt[37];
3357}
3358
3359static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3360 const struct cntr_entry *entry,
3361 void *context, int vl, int mode, u64 data)
3362{
3363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3364
3365 return dd->send_egress_err_status_cnt[36];
3366}
3367
3368static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3369 const struct cntr_entry *entry,
3370 void *context, int vl, int mode, u64 data)
3371{
3372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3373
3374 return dd->send_egress_err_status_cnt[35];
3375}
3376
3377static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3378 const struct cntr_entry *entry,
3379 void *context, int vl, int mode, u64 data)
3380{
3381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3382
3383 return dd->send_egress_err_status_cnt[34];
3384}
3385
3386static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3387 const struct cntr_entry *entry,
3388 void *context, int vl, int mode, u64 data)
3389{
3390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3391
3392 return dd->send_egress_err_status_cnt[33];
3393}
3394
3395static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3396 const struct cntr_entry *entry,
3397 void *context, int vl, int mode, u64 data)
3398{
3399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3400
3401 return dd->send_egress_err_status_cnt[32];
3402}
3403
3404static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3405 const struct cntr_entry *entry,
3406 void *context, int vl, int mode, u64 data)
3407{
3408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3409
3410 return dd->send_egress_err_status_cnt[31];
3411}
3412
3413static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3414 const struct cntr_entry *entry,
3415 void *context, int vl, int mode, u64 data)
3416{
3417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3418
3419 return dd->send_egress_err_status_cnt[30];
3420}
3421
3422static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3423 const struct cntr_entry *entry,
3424 void *context, int vl, int mode, u64 data)
3425{
3426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3427
3428 return dd->send_egress_err_status_cnt[29];
3429}
3430
3431static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3432 const struct cntr_entry *entry,
3433 void *context, int vl, int mode, u64 data)
3434{
3435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3436
3437 return dd->send_egress_err_status_cnt[28];
3438}
3439
3440static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3441 const struct cntr_entry *entry,
3442 void *context, int vl, int mode, u64 data)
3443{
3444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3445
3446 return dd->send_egress_err_status_cnt[27];
3447}
3448
3449static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3450 const struct cntr_entry *entry,
3451 void *context, int vl, int mode, u64 data)
3452{
3453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3454
3455 return dd->send_egress_err_status_cnt[26];
3456}
3457
3458static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3459 const struct cntr_entry *entry,
3460 void *context, int vl, int mode, u64 data)
3461{
3462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3463
3464 return dd->send_egress_err_status_cnt[25];
3465}
3466
3467static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3468 const struct cntr_entry *entry,
3469 void *context, int vl, int mode, u64 data)
3470{
3471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3472
3473 return dd->send_egress_err_status_cnt[24];
3474}
3475
3476static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3477 const struct cntr_entry *entry,
3478 void *context, int vl, int mode, u64 data)
3479{
3480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3481
3482 return dd->send_egress_err_status_cnt[23];
3483}
3484
3485static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3486 const struct cntr_entry *entry,
3487 void *context, int vl, int mode, u64 data)
3488{
3489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3490
3491 return dd->send_egress_err_status_cnt[22];
3492}
3493
3494static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3495 const struct cntr_entry *entry,
3496 void *context, int vl, int mode, u64 data)
3497{
3498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3499
3500 return dd->send_egress_err_status_cnt[21];
3501}
3502
3503static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3504 const struct cntr_entry *entry,
3505 void *context, int vl, int mode, u64 data)
3506{
3507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3508
3509 return dd->send_egress_err_status_cnt[20];
3510}
3511
3512static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3513 const struct cntr_entry *entry,
3514 void *context, int vl, int mode, u64 data)
3515{
3516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3517
3518 return dd->send_egress_err_status_cnt[19];
3519}
3520
3521static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3522 const struct cntr_entry *entry,
3523 void *context, int vl, int mode, u64 data)
3524{
3525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3526
3527 return dd->send_egress_err_status_cnt[18];
3528}
3529
3530static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3531 const struct cntr_entry *entry,
3532 void *context, int vl, int mode, u64 data)
3533{
3534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3535
3536 return dd->send_egress_err_status_cnt[17];
3537}
3538
3539static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3540 const struct cntr_entry *entry,
3541 void *context, int vl, int mode, u64 data)
3542{
3543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3544
3545 return dd->send_egress_err_status_cnt[16];
3546}
3547
3548static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3549 void *context, int vl, int mode,
3550 u64 data)
3551{
3552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3553
3554 return dd->send_egress_err_status_cnt[15];
3555}
3556
3557static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3558 void *context, int vl,
3559 int mode, u64 data)
3560{
3561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3562
3563 return dd->send_egress_err_status_cnt[14];
3564}
3565
3566static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3567 void *context, int vl, int mode,
3568 u64 data)
3569{
3570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3571
3572 return dd->send_egress_err_status_cnt[13];
3573}
3574
3575static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3576 void *context, int vl, int mode,
3577 u64 data)
3578{
3579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3580
3581 return dd->send_egress_err_status_cnt[12];
3582}
3583
3584static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3585 const struct cntr_entry *entry,
3586 void *context, int vl, int mode, u64 data)
3587{
3588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3589
3590 return dd->send_egress_err_status_cnt[11];
3591}
3592
3593static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3594 void *context, int vl, int mode,
3595 u64 data)
3596{
3597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3598
3599 return dd->send_egress_err_status_cnt[10];
3600}
3601
3602static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3603 void *context, int vl, int mode,
3604 u64 data)
3605{
3606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3607
3608 return dd->send_egress_err_status_cnt[9];
3609}
3610
3611static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3612 const struct cntr_entry *entry,
3613 void *context, int vl, int mode, u64 data)
3614{
3615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3616
3617 return dd->send_egress_err_status_cnt[8];
3618}
3619
3620static u64 access_tx_pio_launch_intf_parity_err_cnt(
3621 const struct cntr_entry *entry,
3622 void *context, int vl, int mode, u64 data)
3623{
3624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3625
3626 return dd->send_egress_err_status_cnt[7];
3627}
3628
3629static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3630 void *context, int vl, int mode,
3631 u64 data)
3632{
3633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3634
3635 return dd->send_egress_err_status_cnt[6];
3636}
3637
3638static u64 access_tx_incorrect_link_state_err_cnt(
3639 const struct cntr_entry *entry,
3640 void *context, int vl, int mode, u64 data)
3641{
3642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3643
3644 return dd->send_egress_err_status_cnt[5];
3645}
3646
3647static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3648 void *context, int vl, int mode,
3649 u64 data)
3650{
3651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3652
3653 return dd->send_egress_err_status_cnt[4];
3654}
3655
3656static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3657 const struct cntr_entry *entry,
3658 void *context, int vl, int mode, u64 data)
3659{
3660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3661
3662 return dd->send_egress_err_status_cnt[3];
3663}
3664
3665static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3666 void *context, int vl, int mode,
3667 u64 data)
3668{
3669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3670
3671 return dd->send_egress_err_status_cnt[2];
3672}
3673
3674static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3675 const struct cntr_entry *entry,
3676 void *context, int vl, int mode, u64 data)
3677{
3678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3679
3680 return dd->send_egress_err_status_cnt[1];
3681}
3682
3683static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3684 const struct cntr_entry *entry,
3685 void *context, int vl, int mode, u64 data)
3686{
3687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3688
3689 return dd->send_egress_err_status_cnt[0];
3690}
3691
3692/*
3693 * Software counters corresponding to each of the
3694 * error status bits within SendErrStatus
3695 */
3696static u64 access_send_csr_write_bad_addr_err_cnt(
3697 const struct cntr_entry *entry,
3698 void *context, int vl, int mode, u64 data)
3699{
3700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3701
3702 return dd->send_err_status_cnt[2];
3703}
3704
3705static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3706 void *context, int vl,
3707 int mode, u64 data)
3708{
3709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3710
3711 return dd->send_err_status_cnt[1];
3712}
3713
3714static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3715 void *context, int vl, int mode,
3716 u64 data)
3717{
3718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3719
3720 return dd->send_err_status_cnt[0];
3721}
3722
3723/*
3724 * Software counters corresponding to each of the
3725 * error status bits within SendCtxtErrStatus
3726 */
3727static u64 access_pio_write_out_of_bounds_err_cnt(
3728 const struct cntr_entry *entry,
3729 void *context, int vl, int mode, u64 data)
3730{
3731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3732
3733 return dd->sw_ctxt_err_status_cnt[4];
3734}
3735
3736static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3737 void *context, int vl, int mode,
3738 u64 data)
3739{
3740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3741
3742 return dd->sw_ctxt_err_status_cnt[3];
3743}
3744
3745static u64 access_pio_write_crosses_boundary_err_cnt(
3746 const struct cntr_entry *entry,
3747 void *context, int vl, int mode, u64 data)
3748{
3749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3750
3751 return dd->sw_ctxt_err_status_cnt[2];
3752}
3753
3754static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3755 void *context, int vl,
3756 int mode, u64 data)
3757{
3758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3759
3760 return dd->sw_ctxt_err_status_cnt[1];
3761}
3762
3763static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3764 void *context, int vl, int mode,
3765 u64 data)
3766{
3767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3768
3769 return dd->sw_ctxt_err_status_cnt[0];
3770}
3771
3772/*
3773 * Software counters corresponding to each of the
3774 * error status bits within SendDmaEngErrStatus
3775 */
3776static u64 access_sdma_header_request_fifo_cor_err_cnt(
3777 const struct cntr_entry *entry,
3778 void *context, int vl, int mode, u64 data)
3779{
3780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3781
3782 return dd->sw_send_dma_eng_err_status_cnt[23];
3783}
3784
3785static u64 access_sdma_header_storage_cor_err_cnt(
3786 const struct cntr_entry *entry,
3787 void *context, int vl, int mode, u64 data)
3788{
3789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3790
3791 return dd->sw_send_dma_eng_err_status_cnt[22];
3792}
3793
3794static u64 access_sdma_packet_tracking_cor_err_cnt(
3795 const struct cntr_entry *entry,
3796 void *context, int vl, int mode, u64 data)
3797{
3798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3799
3800 return dd->sw_send_dma_eng_err_status_cnt[21];
3801}
3802
3803static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3804 void *context, int vl, int mode,
3805 u64 data)
3806{
3807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3808
3809 return dd->sw_send_dma_eng_err_status_cnt[20];
3810}
3811
3812static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3813 void *context, int vl, int mode,
3814 u64 data)
3815{
3816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3817
3818 return dd->sw_send_dma_eng_err_status_cnt[19];
3819}
3820
3821static u64 access_sdma_header_request_fifo_unc_err_cnt(
3822 const struct cntr_entry *entry,
3823 void *context, int vl, int mode, u64 data)
3824{
3825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3826
3827 return dd->sw_send_dma_eng_err_status_cnt[18];
3828}
3829
3830static u64 access_sdma_header_storage_unc_err_cnt(
3831 const struct cntr_entry *entry,
3832 void *context, int vl, int mode, u64 data)
3833{
3834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3835
3836 return dd->sw_send_dma_eng_err_status_cnt[17];
3837}
3838
3839static u64 access_sdma_packet_tracking_unc_err_cnt(
3840 const struct cntr_entry *entry,
3841 void *context, int vl, int mode, u64 data)
3842{
3843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3844
3845 return dd->sw_send_dma_eng_err_status_cnt[16];
3846}
3847
3848static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3849 void *context, int vl, int mode,
3850 u64 data)
3851{
3852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3853
3854 return dd->sw_send_dma_eng_err_status_cnt[15];
3855}
3856
3857static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3858 void *context, int vl, int mode,
3859 u64 data)
3860{
3861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3862
3863 return dd->sw_send_dma_eng_err_status_cnt[14];
3864}
3865
3866static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3867 void *context, int vl, int mode,
3868 u64 data)
3869{
3870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3871
3872 return dd->sw_send_dma_eng_err_status_cnt[13];
3873}
3874
3875static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3876 void *context, int vl, int mode,
3877 u64 data)
3878{
3879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3880
3881 return dd->sw_send_dma_eng_err_status_cnt[12];
3882}
3883
3884static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3885 void *context, int vl, int mode,
3886 u64 data)
3887{
3888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3889
3890 return dd->sw_send_dma_eng_err_status_cnt[11];
3891}
3892
3893static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3894 void *context, int vl, int mode,
3895 u64 data)
3896{
3897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3898
3899 return dd->sw_send_dma_eng_err_status_cnt[10];
3900}
3901
3902static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3903 void *context, int vl, int mode,
3904 u64 data)
3905{
3906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3907
3908 return dd->sw_send_dma_eng_err_status_cnt[9];
3909}
3910
3911static u64 access_sdma_packet_desc_overflow_err_cnt(
3912 const struct cntr_entry *entry,
3913 void *context, int vl, int mode, u64 data)
3914{
3915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3916
3917 return dd->sw_send_dma_eng_err_status_cnt[8];
3918}
3919
3920static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3921 void *context, int vl,
3922 int mode, u64 data)
3923{
3924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3925
3926 return dd->sw_send_dma_eng_err_status_cnt[7];
3927}
3928
3929static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3930 void *context, int vl, int mode, u64 data)
3931{
3932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3933
3934 return dd->sw_send_dma_eng_err_status_cnt[6];
3935}
3936
3937static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3938 void *context, int vl, int mode,
3939 u64 data)
3940{
3941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3942
3943 return dd->sw_send_dma_eng_err_status_cnt[5];
3944}
3945
3946static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3947 void *context, int vl, int mode,
3948 u64 data)
3949{
3950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3951
3952 return dd->sw_send_dma_eng_err_status_cnt[4];
3953}
3954
3955static u64 access_sdma_tail_out_of_bounds_err_cnt(
3956 const struct cntr_entry *entry,
3957 void *context, int vl, int mode, u64 data)
3958{
3959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3960
3961 return dd->sw_send_dma_eng_err_status_cnt[3];
3962}
3963
3964static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3965 void *context, int vl, int mode,
3966 u64 data)
3967{
3968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3969
3970 return dd->sw_send_dma_eng_err_status_cnt[2];
3971}
3972
3973static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3974 void *context, int vl, int mode,
3975 u64 data)
3976{
3977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3978
3979 return dd->sw_send_dma_eng_err_status_cnt[1];
3980}
3981
3982static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3983 void *context, int vl, int mode,
3984 u64 data)
3985{
3986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3987
3988 return dd->sw_send_dma_eng_err_status_cnt[0];
3989}
3990
Jakub Pawlak2b719042016-07-01 16:01:22 -07003991static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3992 void *context, int vl, int mode,
3993 u64 data)
3994{
3995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3996
3997 u64 val = 0;
3998 u64 csr = entry->csr;
3999
4000 val = read_write_csr(dd, csr, mode, data);
4001 if (mode == CNTR_MODE_R) {
4002 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4003 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4004 } else if (mode == CNTR_MODE_W) {
4005 dd->sw_rcv_bypass_packet_errors = 0;
4006 } else {
4007 dd_dev_err(dd, "Invalid cntr register access mode");
4008 return 0;
4009 }
4010 return val;
4011}
4012
Mike Marciniszyn77241052015-07-30 15:17:43 -04004013#define def_access_sw_cpu(cntr) \
4014static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4015 void *context, int vl, int mode, u64 data) \
4016{ \
4017 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004018 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4019 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004020 mode, data); \
4021}
4022
4023def_access_sw_cpu(rc_acks);
4024def_access_sw_cpu(rc_qacks);
4025def_access_sw_cpu(rc_delayed_comp);
4026
4027#define def_access_ibp_counter(cntr) \
4028static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4029 void *context, int vl, int mode, u64 data) \
4030{ \
4031 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4032 \
4033 if (vl != CNTR_INVALID_VL) \
4034 return 0; \
4035 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004036 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004037 mode, data); \
4038}
4039
4040def_access_ibp_counter(loop_pkts);
4041def_access_ibp_counter(rc_resends);
4042def_access_ibp_counter(rnr_naks);
4043def_access_ibp_counter(other_naks);
4044def_access_ibp_counter(rc_timeouts);
4045def_access_ibp_counter(pkt_drops);
4046def_access_ibp_counter(dmawait);
4047def_access_ibp_counter(rc_seqnak);
4048def_access_ibp_counter(rc_dupreq);
4049def_access_ibp_counter(rdma_seq);
4050def_access_ibp_counter(unaligned);
4051def_access_ibp_counter(seq_naks);
4052
4053static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4054[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4055[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4056 CNTR_NORMAL),
4057[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4058 CNTR_NORMAL),
4059[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4060 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4061 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004062[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4063 CNTR_NORMAL),
4064[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4065 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4066[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4067 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4068[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4069 CNTR_NORMAL),
4070[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4071 CNTR_NORMAL),
4072[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4073 CNTR_NORMAL),
4074[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4075 CNTR_NORMAL),
4076[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4077 CNTR_NORMAL),
4078[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4079 CNTR_NORMAL),
4080[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4081 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4082[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4083 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4084[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4085 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004086[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4087 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004088[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4089 CNTR_SYNTH),
4090[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4091 CNTR_SYNTH),
4092[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4093 CNTR_SYNTH),
4094[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4095 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4096[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4097 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4098 CNTR_SYNTH),
4099[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4100 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4101[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4102 CNTR_SYNTH),
4103[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4104 CNTR_SYNTH),
4105[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4106 CNTR_SYNTH),
4107[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4108 CNTR_SYNTH),
4109[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4110 CNTR_SYNTH),
4111[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4112 CNTR_SYNTH),
4113[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4114 CNTR_SYNTH),
4115[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4116 CNTR_SYNTH | CNTR_VL),
4117[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4118 CNTR_SYNTH | CNTR_VL),
4119[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4120[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4121 CNTR_SYNTH | CNTR_VL),
4122[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4123[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4124 CNTR_SYNTH | CNTR_VL),
4125[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4126 CNTR_SYNTH),
4127[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4128 CNTR_SYNTH | CNTR_VL),
4129[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4130 CNTR_SYNTH),
4131[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4132 CNTR_SYNTH | CNTR_VL),
4133[C_DC_TOTAL_CRC] =
4134 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4135 CNTR_SYNTH),
4136[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4137 CNTR_SYNTH),
4138[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4139 CNTR_SYNTH),
4140[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4141 CNTR_SYNTH),
4142[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4143 CNTR_SYNTH),
4144[C_DC_CRC_MULT_LN] =
4145 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4146 CNTR_SYNTH),
4147[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4148 CNTR_SYNTH),
4149[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4150 CNTR_SYNTH),
4151[C_DC_SEQ_CRC_CNT] =
4152 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4153 CNTR_SYNTH),
4154[C_DC_ESC0_ONLY_CNT] =
4155 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4156 CNTR_SYNTH),
4157[C_DC_ESC0_PLUS1_CNT] =
4158 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4159 CNTR_SYNTH),
4160[C_DC_ESC0_PLUS2_CNT] =
4161 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4162 CNTR_SYNTH),
4163[C_DC_REINIT_FROM_PEER_CNT] =
4164 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4165 CNTR_SYNTH),
4166[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4167 CNTR_SYNTH),
4168[C_DC_MISC_FLG_CNT] =
4169 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4170 CNTR_SYNTH),
4171[C_DC_PRF_GOOD_LTP_CNT] =
4172 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4173[C_DC_PRF_ACCEPTED_LTP_CNT] =
4174 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4175 CNTR_SYNTH),
4176[C_DC_PRF_RX_FLIT_CNT] =
4177 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4178[C_DC_PRF_TX_FLIT_CNT] =
4179 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4180[C_DC_PRF_CLK_CNTR] =
4181 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4182[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4183 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4184[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4185 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4186 CNTR_SYNTH),
4187[C_DC_PG_STS_TX_SBE_CNT] =
4188 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4189[C_DC_PG_STS_TX_MBE_CNT] =
4190 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4191 CNTR_SYNTH),
4192[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4193 access_sw_cpu_intr),
4194[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4195 access_sw_cpu_rcv_limit),
4196[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4197 access_sw_vtx_wait),
4198[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4199 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004200[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4201 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004202[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4203 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004204[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4205 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004206[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4207 SEND_DMA_DESC_FETCHED_CNT, 0,
4208 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4209 dev_access_u32_csr),
4210[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4211 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4212 access_sde_int_cnt),
4213[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4214 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4215 access_sde_err_cnt),
4216[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4217 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4218 access_sde_idle_int_cnt),
4219[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4220 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4221 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004222/* MISC_ERR_STATUS */
4223[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4224 CNTR_NORMAL,
4225 access_misc_pll_lock_fail_err_cnt),
4226[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4227 CNTR_NORMAL,
4228 access_misc_mbist_fail_err_cnt),
4229[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4230 CNTR_NORMAL,
4231 access_misc_invalid_eep_cmd_err_cnt),
4232[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4233 CNTR_NORMAL,
4234 access_misc_efuse_done_parity_err_cnt),
4235[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4236 CNTR_NORMAL,
4237 access_misc_efuse_write_err_cnt),
4238[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4239 0, CNTR_NORMAL,
4240 access_misc_efuse_read_bad_addr_err_cnt),
4241[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4242 CNTR_NORMAL,
4243 access_misc_efuse_csr_parity_err_cnt),
4244[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4245 CNTR_NORMAL,
4246 access_misc_fw_auth_failed_err_cnt),
4247[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4248 CNTR_NORMAL,
4249 access_misc_key_mismatch_err_cnt),
4250[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4251 CNTR_NORMAL,
4252 access_misc_sbus_write_failed_err_cnt),
4253[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4254 CNTR_NORMAL,
4255 access_misc_csr_write_bad_addr_err_cnt),
4256[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4257 CNTR_NORMAL,
4258 access_misc_csr_read_bad_addr_err_cnt),
4259[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4260 CNTR_NORMAL,
4261 access_misc_csr_parity_err_cnt),
4262/* CceErrStatus */
4263[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4264 CNTR_NORMAL,
4265 access_sw_cce_err_status_aggregated_cnt),
4266[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_cce_msix_csr_parity_err_cnt),
4269[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_cce_int_map_unc_err_cnt),
4272[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4273 CNTR_NORMAL,
4274 access_cce_int_map_cor_err_cnt),
4275[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_cce_msix_table_unc_err_cnt),
4278[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4279 CNTR_NORMAL,
4280 access_cce_msix_table_cor_err_cnt),
4281[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4282 0, CNTR_NORMAL,
4283 access_cce_rxdma_conv_fifo_parity_err_cnt),
4284[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4285 0, CNTR_NORMAL,
4286 access_cce_rcpl_async_fifo_parity_err_cnt),
4287[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4288 CNTR_NORMAL,
4289 access_cce_seg_write_bad_addr_err_cnt),
4290[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4291 CNTR_NORMAL,
4292 access_cce_seg_read_bad_addr_err_cnt),
4293[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4294 CNTR_NORMAL,
4295 access_la_triggered_cnt),
4296[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4297 CNTR_NORMAL,
4298 access_cce_trgt_cpl_timeout_err_cnt),
4299[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_pcic_receive_parity_err_cnt),
4302[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4303 CNTR_NORMAL,
4304 access_pcic_transmit_back_parity_err_cnt),
4305[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4306 0, CNTR_NORMAL,
4307 access_pcic_transmit_front_parity_err_cnt),
4308[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4309 CNTR_NORMAL,
4310 access_pcic_cpl_dat_q_unc_err_cnt),
4311[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_pcic_cpl_hd_q_unc_err_cnt),
4314[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_pcic_post_dat_q_unc_err_cnt),
4317[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4318 CNTR_NORMAL,
4319 access_pcic_post_hd_q_unc_err_cnt),
4320[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_pcic_retry_sot_mem_unc_err_cnt),
4323[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_pcic_retry_mem_unc_err),
4326[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_pcic_n_post_dat_q_parity_err_cnt),
4329[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4330 CNTR_NORMAL,
4331 access_pcic_n_post_h_q_parity_err_cnt),
4332[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_pcic_cpl_dat_q_cor_err_cnt),
4335[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4336 CNTR_NORMAL,
4337 access_pcic_cpl_hd_q_cor_err_cnt),
4338[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_pcic_post_dat_q_cor_err_cnt),
4341[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4342 CNTR_NORMAL,
4343 access_pcic_post_hd_q_cor_err_cnt),
4344[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4345 CNTR_NORMAL,
4346 access_pcic_retry_sot_mem_cor_err_cnt),
4347[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_pcic_retry_mem_cor_err_cnt),
4350[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4351 "CceCli1AsyncFifoDbgParityError", 0, 0,
4352 CNTR_NORMAL,
4353 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4354[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4355 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4356 CNTR_NORMAL,
4357 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4358 ),
4359[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4360 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4363[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4364 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4367[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4368 0, CNTR_NORMAL,
4369 access_cce_cli2_async_fifo_parity_err_cnt),
4370[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_cce_csr_cfg_bus_parity_err_cnt),
4373[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4374 0, CNTR_NORMAL,
4375 access_cce_cli0_async_fifo_parity_err_cnt),
4376[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_cce_rspd_data_parity_err_cnt),
4379[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_cce_trgt_access_err_cnt),
4382[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4383 0, CNTR_NORMAL,
4384 access_cce_trgt_async_fifo_parity_err_cnt),
4385[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_cce_csr_write_bad_addr_err_cnt),
4388[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_cce_csr_read_bad_addr_err_cnt),
4391[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_ccs_csr_parity_err_cnt),
4394
4395/* RcvErrStatus */
4396[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_rx_csr_parity_err_cnt),
4399[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4400 CNTR_NORMAL,
4401 access_rx_csr_write_bad_addr_err_cnt),
4402[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4403 CNTR_NORMAL,
4404 access_rx_csr_read_bad_addr_err_cnt),
4405[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_rx_dma_csr_unc_err_cnt),
4408[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_dma_dq_fsm_encoding_err_cnt),
4411[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rx_dma_eq_fsm_encoding_err_cnt),
4414[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rx_dma_csr_parity_err_cnt),
4417[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4418 CNTR_NORMAL,
4419 access_rx_rbuf_data_cor_err_cnt),
4420[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4421 CNTR_NORMAL,
4422 access_rx_rbuf_data_unc_err_cnt),
4423[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4424 CNTR_NORMAL,
4425 access_rx_dma_data_fifo_rd_cor_err_cnt),
4426[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_rx_dma_data_fifo_rd_unc_err_cnt),
4429[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4430 CNTR_NORMAL,
4431 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4432[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4433 CNTR_NORMAL,
4434 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4435[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4436 CNTR_NORMAL,
4437 access_rx_rbuf_desc_part2_cor_err_cnt),
4438[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4439 CNTR_NORMAL,
4440 access_rx_rbuf_desc_part2_unc_err_cnt),
4441[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_desc_part1_cor_err_cnt),
4444[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4445 CNTR_NORMAL,
4446 access_rx_rbuf_desc_part1_unc_err_cnt),
4447[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4448 CNTR_NORMAL,
4449 access_rx_hq_intr_fsm_err_cnt),
4450[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4451 CNTR_NORMAL,
4452 access_rx_hq_intr_csr_parity_err_cnt),
4453[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4454 CNTR_NORMAL,
4455 access_rx_lookup_csr_parity_err_cnt),
4456[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4457 CNTR_NORMAL,
4458 access_rx_lookup_rcv_array_cor_err_cnt),
4459[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4460 CNTR_NORMAL,
4461 access_rx_lookup_rcv_array_unc_err_cnt),
4462[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4463 0, CNTR_NORMAL,
4464 access_rx_lookup_des_part2_parity_err_cnt),
4465[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4466 0, CNTR_NORMAL,
4467 access_rx_lookup_des_part1_unc_cor_err_cnt),
4468[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4469 CNTR_NORMAL,
4470 access_rx_lookup_des_part1_unc_err_cnt),
4471[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4472 CNTR_NORMAL,
4473 access_rx_rbuf_next_free_buf_cor_err_cnt),
4474[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4475 CNTR_NORMAL,
4476 access_rx_rbuf_next_free_buf_unc_err_cnt),
4477[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4478 "RxRbufFlInitWrAddrParityErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4481[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4482 0, CNTR_NORMAL,
4483 access_rx_rbuf_fl_initdone_parity_err_cnt),
4484[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4485 0, CNTR_NORMAL,
4486 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4487[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4490[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rbuf_empty_err_cnt),
4493[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_rbuf_full_err_cnt),
4496[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rbuf_bad_lookup_err_cnt),
4499[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rbuf_ctx_id_parity_err_cnt),
4502[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rbuf_csr_qeopdw_parity_err_cnt),
4505[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4506 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4509[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4510 "RxRbufCsrQTlPtrParityErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4513[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4514 0, CNTR_NORMAL,
4515 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4516[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4517 0, CNTR_NORMAL,
4518 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4519[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4520 0, 0, CNTR_NORMAL,
4521 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4522[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4523 0, CNTR_NORMAL,
4524 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4525[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4526 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4529[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4530 0, CNTR_NORMAL,
4531 access_rx_rbuf_block_list_read_cor_err_cnt),
4532[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4533 0, CNTR_NORMAL,
4534 access_rx_rbuf_block_list_read_unc_err_cnt),
4535[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4536 CNTR_NORMAL,
4537 access_rx_rbuf_lookup_des_cor_err_cnt),
4538[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4539 CNTR_NORMAL,
4540 access_rx_rbuf_lookup_des_unc_err_cnt),
4541[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4542 "RxRbufLookupDesRegUncCorErr", 0, 0,
4543 CNTR_NORMAL,
4544 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4545[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4548[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_rx_rbuf_free_list_cor_err_cnt),
4551[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_rx_rbuf_free_list_unc_err_cnt),
4554[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_rx_rcv_fsm_encoding_err_cnt),
4557[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_rx_dma_flag_cor_err_cnt),
4560[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_rx_dma_flag_unc_err_cnt),
4563[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_rx_dc_sop_eop_parity_err_cnt),
4566[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4567 CNTR_NORMAL,
4568 access_rx_rcv_csr_parity_err_cnt),
4569[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4570 CNTR_NORMAL,
4571 access_rx_rcv_qp_map_table_cor_err_cnt),
4572[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_rx_rcv_qp_map_table_unc_err_cnt),
4575[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_rx_rcv_data_cor_err_cnt),
4578[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_rx_rcv_data_unc_err_cnt),
4581[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_rx_rcv_hdr_cor_err_cnt),
4584[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_rx_rcv_hdr_unc_err_cnt),
4587[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_rx_dc_intf_parity_err_cnt),
4590[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4591 CNTR_NORMAL,
4592 access_rx_dma_csr_cor_err_cnt),
4593/* SendPioErrStatus */
4594[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_pec_sop_head_parity_err_cnt),
4597[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_pcc_sop_head_parity_err_cnt),
4600[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4601 0, 0, CNTR_NORMAL,
4602 access_pio_last_returned_cnt_parity_err_cnt),
4603[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4604 0, CNTR_NORMAL,
4605 access_pio_current_free_cnt_parity_err_cnt),
4606[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_reserved_31_err_cnt),
4609[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_reserved_30_err_cnt),
4612[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_ppmc_sop_len_err_cnt),
4615[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_pio_ppmc_bqc_mem_parity_err_cnt),
4618[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_pio_vl_fifo_parity_err_cnt),
4621[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4622 CNTR_NORMAL,
4623 access_pio_vlf_sop_parity_err_cnt),
4624[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4625 CNTR_NORMAL,
4626 access_pio_vlf_v1_len_parity_err_cnt),
4627[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4628 CNTR_NORMAL,
4629 access_pio_block_qw_count_parity_err_cnt),
4630[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_pio_write_qw_valid_parity_err_cnt),
4633[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_pio_state_machine_err_cnt),
4636[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_pio_write_data_parity_err_cnt),
4639[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_pio_host_addr_mem_cor_err_cnt),
4642[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_pio_host_addr_mem_unc_err_cnt),
4645[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4648[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_pio_init_sm_in_err_cnt),
4651[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4652 CNTR_NORMAL,
4653 access_pio_ppmc_pbl_fifo_err_cnt),
4654[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4655 0, CNTR_NORMAL,
4656 access_pio_credit_ret_fifo_parity_err_cnt),
4657[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4658 CNTR_NORMAL,
4659 access_pio_v1_len_mem_bank1_cor_err_cnt),
4660[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_pio_v1_len_mem_bank0_cor_err_cnt),
4663[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_pio_v1_len_mem_bank1_unc_err_cnt),
4666[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_pio_v1_len_mem_bank0_unc_err_cnt),
4669[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_pio_sm_pkt_reset_parity_err_cnt),
4672[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_pio_pkt_evict_fifo_parity_err_cnt),
4675[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4676 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4679[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_pio_sbrdctl_crrel_parity_err_cnt),
4682[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_pio_pec_fifo_parity_err_cnt),
4685[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_pio_pcc_fifo_parity_err_cnt),
4688[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4689 CNTR_NORMAL,
4690 access_pio_sb_mem_fifo1_err_cnt),
4691[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4692 CNTR_NORMAL,
4693 access_pio_sb_mem_fifo0_err_cnt),
4694[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_pio_csr_parity_err_cnt),
4697[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_pio_write_addr_parity_err_cnt),
4700[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_pio_write_bad_ctxt_err_cnt),
4703/* SendDmaErrStatus */
4704[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4705 0, CNTR_NORMAL,
4706 access_sdma_pcie_req_tracking_cor_err_cnt),
4707[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4708 0, CNTR_NORMAL,
4709 access_sdma_pcie_req_tracking_unc_err_cnt),
4710[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_sdma_csr_parity_err_cnt),
4713[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_sdma_rpy_tag_err_cnt),
4716/* SendEgressErrStatus */
4717[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4718 CNTR_NORMAL,
4719 access_tx_read_pio_memory_csr_unc_err_cnt),
4720[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4721 0, CNTR_NORMAL,
4722 access_tx_read_sdma_memory_csr_err_cnt),
4723[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4724 CNTR_NORMAL,
4725 access_tx_egress_fifo_cor_err_cnt),
4726[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4727 CNTR_NORMAL,
4728 access_tx_read_pio_memory_cor_err_cnt),
4729[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4730 CNTR_NORMAL,
4731 access_tx_read_sdma_memory_cor_err_cnt),
4732[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4733 CNTR_NORMAL,
4734 access_tx_sb_hdr_cor_err_cnt),
4735[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4736 CNTR_NORMAL,
4737 access_tx_credit_overrun_err_cnt),
4738[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4739 CNTR_NORMAL,
4740 access_tx_launch_fifo8_cor_err_cnt),
4741[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4742 CNTR_NORMAL,
4743 access_tx_launch_fifo7_cor_err_cnt),
4744[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4745 CNTR_NORMAL,
4746 access_tx_launch_fifo6_cor_err_cnt),
4747[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4748 CNTR_NORMAL,
4749 access_tx_launch_fifo5_cor_err_cnt),
4750[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4751 CNTR_NORMAL,
4752 access_tx_launch_fifo4_cor_err_cnt),
4753[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4754 CNTR_NORMAL,
4755 access_tx_launch_fifo3_cor_err_cnt),
4756[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4757 CNTR_NORMAL,
4758 access_tx_launch_fifo2_cor_err_cnt),
4759[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4760 CNTR_NORMAL,
4761 access_tx_launch_fifo1_cor_err_cnt),
4762[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4763 CNTR_NORMAL,
4764 access_tx_launch_fifo0_cor_err_cnt),
4765[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4766 CNTR_NORMAL,
4767 access_tx_credit_return_vl_err_cnt),
4768[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4769 CNTR_NORMAL,
4770 access_tx_hcrc_insertion_err_cnt),
4771[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4772 CNTR_NORMAL,
4773 access_tx_egress_fifo_unc_err_cnt),
4774[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4775 CNTR_NORMAL,
4776 access_tx_read_pio_memory_unc_err_cnt),
4777[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4778 CNTR_NORMAL,
4779 access_tx_read_sdma_memory_unc_err_cnt),
4780[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4781 CNTR_NORMAL,
4782 access_tx_sb_hdr_unc_err_cnt),
4783[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4784 CNTR_NORMAL,
4785 access_tx_credit_return_partiy_err_cnt),
4786[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4787 0, 0, CNTR_NORMAL,
4788 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4789[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4790 0, 0, CNTR_NORMAL,
4791 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4792[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4793 0, 0, CNTR_NORMAL,
4794 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4795[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4796 0, 0, CNTR_NORMAL,
4797 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4798[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4799 0, 0, CNTR_NORMAL,
4800 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4801[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4802 0, 0, CNTR_NORMAL,
4803 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4804[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4805 0, 0, CNTR_NORMAL,
4806 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4807[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4808 0, 0, CNTR_NORMAL,
4809 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4810[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4811 0, 0, CNTR_NORMAL,
4812 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4813[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4814 0, 0, CNTR_NORMAL,
4815 access_tx_sdma15_disallowed_packet_err_cnt),
4816[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4817 0, 0, CNTR_NORMAL,
4818 access_tx_sdma14_disallowed_packet_err_cnt),
4819[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4820 0, 0, CNTR_NORMAL,
4821 access_tx_sdma13_disallowed_packet_err_cnt),
4822[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4823 0, 0, CNTR_NORMAL,
4824 access_tx_sdma12_disallowed_packet_err_cnt),
4825[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4826 0, 0, CNTR_NORMAL,
4827 access_tx_sdma11_disallowed_packet_err_cnt),
4828[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4829 0, 0, CNTR_NORMAL,
4830 access_tx_sdma10_disallowed_packet_err_cnt),
4831[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4832 0, 0, CNTR_NORMAL,
4833 access_tx_sdma9_disallowed_packet_err_cnt),
4834[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4835 0, 0, CNTR_NORMAL,
4836 access_tx_sdma8_disallowed_packet_err_cnt),
4837[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4838 0, 0, CNTR_NORMAL,
4839 access_tx_sdma7_disallowed_packet_err_cnt),
4840[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4841 0, 0, CNTR_NORMAL,
4842 access_tx_sdma6_disallowed_packet_err_cnt),
4843[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4844 0, 0, CNTR_NORMAL,
4845 access_tx_sdma5_disallowed_packet_err_cnt),
4846[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4847 0, 0, CNTR_NORMAL,
4848 access_tx_sdma4_disallowed_packet_err_cnt),
4849[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4850 0, 0, CNTR_NORMAL,
4851 access_tx_sdma3_disallowed_packet_err_cnt),
4852[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4853 0, 0, CNTR_NORMAL,
4854 access_tx_sdma2_disallowed_packet_err_cnt),
4855[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4856 0, 0, CNTR_NORMAL,
4857 access_tx_sdma1_disallowed_packet_err_cnt),
4858[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4859 0, 0, CNTR_NORMAL,
4860 access_tx_sdma0_disallowed_packet_err_cnt),
4861[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4862 CNTR_NORMAL,
4863 access_tx_config_parity_err_cnt),
4864[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4865 CNTR_NORMAL,
4866 access_tx_sbrd_ctl_csr_parity_err_cnt),
4867[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4868 CNTR_NORMAL,
4869 access_tx_launch_csr_parity_err_cnt),
4870[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4871 CNTR_NORMAL,
4872 access_tx_illegal_vl_err_cnt),
4873[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4874 "TxSbrdCtlStateMachineParityErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4877[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4878 CNTR_NORMAL,
4879 access_egress_reserved_10_err_cnt),
4880[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4881 CNTR_NORMAL,
4882 access_egress_reserved_9_err_cnt),
4883[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4884 0, 0, CNTR_NORMAL,
4885 access_tx_sdma_launch_intf_parity_err_cnt),
4886[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4887 CNTR_NORMAL,
4888 access_tx_pio_launch_intf_parity_err_cnt),
4889[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4890 CNTR_NORMAL,
4891 access_egress_reserved_6_err_cnt),
4892[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4893 CNTR_NORMAL,
4894 access_tx_incorrect_link_state_err_cnt),
4895[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4896 CNTR_NORMAL,
4897 access_tx_linkdown_err_cnt),
4898[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4899 "EgressFifoUnderrunOrParityErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4902[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4903 CNTR_NORMAL,
4904 access_egress_reserved_2_err_cnt),
4905[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_tx_pkt_integrity_mem_unc_err_cnt),
4908[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_tx_pkt_integrity_mem_cor_err_cnt),
4911/* SendErrStatus */
4912[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_send_csr_write_bad_addr_err_cnt),
4915[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4916 CNTR_NORMAL,
4917 access_send_csr_read_bad_addr_err_cnt),
4918[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_send_csr_parity_cnt),
4921/* SendCtxtErrStatus */
4922[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_pio_write_out_of_bounds_err_cnt),
4925[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_pio_write_overflow_err_cnt),
4928[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4929 0, 0, CNTR_NORMAL,
4930 access_pio_write_crosses_boundary_err_cnt),
4931[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4932 CNTR_NORMAL,
4933 access_pio_disallowed_packet_err_cnt),
4934[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4935 CNTR_NORMAL,
4936 access_pio_inconsistent_sop_err_cnt),
4937/* SendDmaEngErrStatus */
4938[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4939 0, 0, CNTR_NORMAL,
4940 access_sdma_header_request_fifo_cor_err_cnt),
4941[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_sdma_header_storage_cor_err_cnt),
4944[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4945 CNTR_NORMAL,
4946 access_sdma_packet_tracking_cor_err_cnt),
4947[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4948 CNTR_NORMAL,
4949 access_sdma_assembly_cor_err_cnt),
4950[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4951 CNTR_NORMAL,
4952 access_sdma_desc_table_cor_err_cnt),
4953[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4954 0, 0, CNTR_NORMAL,
4955 access_sdma_header_request_fifo_unc_err_cnt),
4956[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4957 CNTR_NORMAL,
4958 access_sdma_header_storage_unc_err_cnt),
4959[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4960 CNTR_NORMAL,
4961 access_sdma_packet_tracking_unc_err_cnt),
4962[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4963 CNTR_NORMAL,
4964 access_sdma_assembly_unc_err_cnt),
4965[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4966 CNTR_NORMAL,
4967 access_sdma_desc_table_unc_err_cnt),
4968[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4969 CNTR_NORMAL,
4970 access_sdma_timeout_err_cnt),
4971[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4972 CNTR_NORMAL,
4973 access_sdma_header_length_err_cnt),
4974[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4975 CNTR_NORMAL,
4976 access_sdma_header_address_err_cnt),
4977[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4978 CNTR_NORMAL,
4979 access_sdma_header_select_err_cnt),
4980[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4981 CNTR_NORMAL,
4982 access_sdma_reserved_9_err_cnt),
4983[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4984 CNTR_NORMAL,
4985 access_sdma_packet_desc_overflow_err_cnt),
4986[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4987 CNTR_NORMAL,
4988 access_sdma_length_mismatch_err_cnt),
4989[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4990 CNTR_NORMAL,
4991 access_sdma_halt_err_cnt),
4992[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4993 CNTR_NORMAL,
4994 access_sdma_mem_read_err_cnt),
4995[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4996 CNTR_NORMAL,
4997 access_sdma_first_desc_err_cnt),
4998[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4999 CNTR_NORMAL,
5000 access_sdma_tail_out_of_bounds_err_cnt),
5001[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5002 CNTR_NORMAL,
5003 access_sdma_too_long_err_cnt),
5004[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5005 CNTR_NORMAL,
5006 access_sdma_gen_mismatch_err_cnt),
5007[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5008 CNTR_NORMAL,
5009 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005010};
5011
5012static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5013[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5014 CNTR_NORMAL),
5015[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5016 CNTR_NORMAL),
5017[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5018 CNTR_NORMAL),
5019[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5020 CNTR_NORMAL),
5021[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5022 CNTR_NORMAL),
5023[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5024 CNTR_NORMAL),
5025[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5026 CNTR_NORMAL),
5027[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5028[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5029[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5030[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005031 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005032[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005033 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005034[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005035 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005036[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5037[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5038[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005039 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005040[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005041 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005042[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5043 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005044[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005045 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005046[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005047 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5048 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005049[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005050 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005051[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005052 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005053[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5054[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5055[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5056[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5057[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5058[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5059[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5060[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5061[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5062[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5063[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5064[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5065[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5066 access_sw_cpu_rc_acks),
5067[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005068 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005069[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005070 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005071[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5072[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5073[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5074[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5075[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5076[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5077[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5078[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5079[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5080[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5081[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5082[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5083[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5084[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5085[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5086[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5087[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5088[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5089[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5090[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5091[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5092[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5093[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5094[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5095[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5096[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5097[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5098[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5099[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5100[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5101[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5102[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5103[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5104[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5105[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5106[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5107[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5108[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5109[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5110[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5111[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5112[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5113[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5114[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5115[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5116[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5117[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5118[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5119[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5120[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5121[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5122[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5123[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5124[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5125[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5126[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5127[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5128[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5129[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5130[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5131[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5132[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5133[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5134[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5135[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5136[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5137[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5138[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5139[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5140[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5141[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5142[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5143[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5144[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5145[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5146[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5147[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5148[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5149[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5150[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5151};
5152
5153/* ======================================================================== */
5154
Mike Marciniszyn77241052015-07-30 15:17:43 -04005155/* return true if this is chip revision revision a */
5156int is_ax(struct hfi1_devdata *dd)
5157{
5158 u8 chip_rev_minor =
5159 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5160 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5161 return (chip_rev_minor & 0xf0) == 0;
5162}
5163
5164/* return true if this is chip revision revision b */
5165int is_bx(struct hfi1_devdata *dd)
5166{
5167 u8 chip_rev_minor =
5168 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5169 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005170 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005171}
5172
5173/*
5174 * Append string s to buffer buf. Arguments curp and len are the current
5175 * position and remaining length, respectively.
5176 *
5177 * return 0 on success, 1 on out of room
5178 */
5179static int append_str(char *buf, char **curp, int *lenp, const char *s)
5180{
5181 char *p = *curp;
5182 int len = *lenp;
5183 int result = 0; /* success */
5184 char c;
5185
5186 /* add a comma, if first in the buffer */
5187 if (p != buf) {
5188 if (len == 0) {
5189 result = 1; /* out of room */
5190 goto done;
5191 }
5192 *p++ = ',';
5193 len--;
5194 }
5195
5196 /* copy the string */
5197 while ((c = *s++) != 0) {
5198 if (len == 0) {
5199 result = 1; /* out of room */
5200 goto done;
5201 }
5202 *p++ = c;
5203 len--;
5204 }
5205
5206done:
5207 /* write return values */
5208 *curp = p;
5209 *lenp = len;
5210
5211 return result;
5212}
5213
5214/*
5215 * Using the given flag table, print a comma separated string into
5216 * the buffer. End in '*' if the buffer is too short.
5217 */
5218static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005219 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005220{
5221 char extra[32];
5222 char *p = buf;
5223 int len = buf_len;
5224 int no_room = 0;
5225 int i;
5226
5227 /* make sure there is at least 2 so we can form "*" */
5228 if (len < 2)
5229 return "";
5230
5231 len--; /* leave room for a nul */
5232 for (i = 0; i < table_size; i++) {
5233 if (flags & table[i].flag) {
5234 no_room = append_str(buf, &p, &len, table[i].str);
5235 if (no_room)
5236 break;
5237 flags &= ~table[i].flag;
5238 }
5239 }
5240
5241 /* any undocumented bits left? */
5242 if (!no_room && flags) {
5243 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5244 no_room = append_str(buf, &p, &len, extra);
5245 }
5246
5247 /* add * if ran out of room */
5248 if (no_room) {
5249 /* may need to back up to add space for a '*' */
5250 if (len == 0)
5251 --p;
5252 *p++ = '*';
5253 }
5254
5255 /* add final nul - space already allocated above */
5256 *p = 0;
5257 return buf;
5258}
5259
5260/* first 8 CCE error interrupt source names */
5261static const char * const cce_misc_names[] = {
5262 "CceErrInt", /* 0 */
5263 "RxeErrInt", /* 1 */
5264 "MiscErrInt", /* 2 */
5265 "Reserved3", /* 3 */
5266 "PioErrInt", /* 4 */
5267 "SDmaErrInt", /* 5 */
5268 "EgressErrInt", /* 6 */
5269 "TxeErrInt" /* 7 */
5270};
5271
5272/*
5273 * Return the miscellaneous error interrupt name.
5274 */
5275static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5276{
5277 if (source < ARRAY_SIZE(cce_misc_names))
5278 strncpy(buf, cce_misc_names[source], bsize);
5279 else
Jubin John17fb4f22016-02-14 20:21:52 -08005280 snprintf(buf, bsize, "Reserved%u",
5281 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005282
5283 return buf;
5284}
5285
5286/*
5287 * Return the SDMA engine error interrupt name.
5288 */
5289static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5290{
5291 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5292 return buf;
5293}
5294
5295/*
5296 * Return the send context error interrupt name.
5297 */
5298static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5299{
5300 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5301 return buf;
5302}
5303
5304static const char * const various_names[] = {
5305 "PbcInt",
5306 "GpioAssertInt",
5307 "Qsfp1Int",
5308 "Qsfp2Int",
5309 "TCritInt"
5310};
5311
5312/*
5313 * Return the various interrupt name.
5314 */
5315static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5316{
5317 if (source < ARRAY_SIZE(various_names))
5318 strncpy(buf, various_names[source], bsize);
5319 else
Jubin John8638b772016-02-14 20:19:24 -08005320 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005321 return buf;
5322}
5323
5324/*
5325 * Return the DC interrupt name.
5326 */
5327static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5328{
5329 static const char * const dc_int_names[] = {
5330 "common",
5331 "lcb",
5332 "8051",
5333 "lbm" /* local block merge */
5334 };
5335
5336 if (source < ARRAY_SIZE(dc_int_names))
5337 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5338 else
5339 snprintf(buf, bsize, "DCInt%u", source);
5340 return buf;
5341}
5342
5343static const char * const sdma_int_names[] = {
5344 "SDmaInt",
5345 "SdmaIdleInt",
5346 "SdmaProgressInt",
5347};
5348
5349/*
5350 * Return the SDMA engine interrupt name.
5351 */
5352static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5353{
5354 /* what interrupt */
5355 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5356 /* which engine */
5357 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5358
5359 if (likely(what < 3))
5360 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5361 else
5362 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5363 return buf;
5364}
5365
5366/*
5367 * Return the receive available interrupt name.
5368 */
5369static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5370{
5371 snprintf(buf, bsize, "RcvAvailInt%u", source);
5372 return buf;
5373}
5374
5375/*
5376 * Return the receive urgent interrupt name.
5377 */
5378static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5379{
5380 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5381 return buf;
5382}
5383
5384/*
5385 * Return the send credit interrupt name.
5386 */
5387static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5388{
5389 snprintf(buf, bsize, "SendCreditInt%u", source);
5390 return buf;
5391}
5392
5393/*
5394 * Return the reserved interrupt name.
5395 */
5396static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5397{
5398 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5399 return buf;
5400}
5401
5402static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5403{
5404 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005405 cce_err_status_flags,
5406 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005407}
5408
5409static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5410{
5411 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005412 rxe_err_status_flags,
5413 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005414}
5415
5416static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5417{
5418 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005419 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005420}
5421
5422static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5423{
5424 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005425 pio_err_status_flags,
5426 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005427}
5428
5429static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5430{
5431 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005432 sdma_err_status_flags,
5433 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005434}
5435
5436static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5437{
5438 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005439 egress_err_status_flags,
5440 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005441}
5442
5443static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5444{
5445 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005446 egress_err_info_flags,
5447 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005448}
5449
5450static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5451{
5452 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005453 send_err_status_flags,
5454 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005455}
5456
5457static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5458{
5459 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005460 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005461
5462 /*
5463 * For most these errors, there is nothing that can be done except
5464 * report or record it.
5465 */
5466 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005467 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005468
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005469 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5470 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005471 /* this error requires a manual drop into SPC freeze mode */
5472 /* then a fix up */
5473 start_freeze_handling(dd->pport, FREEZE_SELF);
5474 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005475
5476 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5477 if (reg & (1ull << i)) {
5478 incr_cntr64(&dd->cce_err_status_cnt[i]);
5479 /* maintain a counter over all cce_err_status errors */
5480 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5481 }
5482 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005483}
5484
5485/*
5486 * Check counters for receive errors that do not have an interrupt
5487 * associated with them.
5488 */
5489#define RCVERR_CHECK_TIME 10
5490static void update_rcverr_timer(unsigned long opaque)
5491{
5492 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5493 struct hfi1_pportdata *ppd = dd->pport;
5494 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5495
5496 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005497 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005498 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005499 set_link_down_reason(
5500 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5501 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005502 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5503 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005504 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005505
5506 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5507}
5508
5509static int init_rcverr(struct hfi1_devdata *dd)
5510{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305511 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005512 /* Assume the hardware counter has been reset */
5513 dd->rcv_ovfl_cnt = 0;
5514 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5515}
5516
5517static void free_rcverr(struct hfi1_devdata *dd)
5518{
5519 if (dd->rcverr_timer.data)
5520 del_timer_sync(&dd->rcverr_timer);
5521 dd->rcverr_timer.data = 0;
5522}
5523
5524static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5525{
5526 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005527 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005528
5529 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005530 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005531
5532 if (reg & ALL_RXE_FREEZE_ERR) {
5533 int flags = 0;
5534
5535 /*
5536 * Freeze mode recovery is disabled for the errors
5537 * in RXE_FREEZE_ABORT_MASK
5538 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005539 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005540 flags = FREEZE_ABORT;
5541
5542 start_freeze_handling(dd->pport, flags);
5543 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005544
5545 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5546 if (reg & (1ull << i))
5547 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5548 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005549}
5550
5551static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5552{
5553 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005554 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005555
5556 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005557 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005558 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5559 if (reg & (1ull << i))
5560 incr_cntr64(&dd->misc_err_status_cnt[i]);
5561 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005562}
5563
5564static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5565{
5566 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005567 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005568
5569 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005570 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005571
5572 if (reg & ALL_PIO_FREEZE_ERR)
5573 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005574
5575 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5576 if (reg & (1ull << i))
5577 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5578 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005579}
5580
5581static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5582{
5583 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005584 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005585
5586 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005587 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005588
5589 if (reg & ALL_SDMA_FREEZE_ERR)
5590 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005591
5592 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5593 if (reg & (1ull << i))
5594 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5595 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005596}
5597
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005598static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5599{
5600 incr_cntr64(&ppd->port_xmit_discards);
5601}
5602
Mike Marciniszyn77241052015-07-30 15:17:43 -04005603static void count_port_inactive(struct hfi1_devdata *dd)
5604{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005605 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005606}
5607
5608/*
5609 * We have had a "disallowed packet" error during egress. Determine the
5610 * integrity check which failed, and update relevant error counter, etc.
5611 *
5612 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5613 * bit of state per integrity check, and so we can miss the reason for an
5614 * egress error if more than one packet fails the same integrity check
5615 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5616 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005617static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5618 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005619{
5620 struct hfi1_pportdata *ppd = dd->pport;
5621 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5622 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5623 char buf[96];
5624
5625 /* clear down all observed info as quickly as possible after read */
5626 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5627
5628 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005629 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5630 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005631
5632 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005633 if (info & PORT_DISCARD_EGRESS_ERRS) {
5634 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005635
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005636 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005637 * Count all applicable bits as individual errors and
5638 * attribute them to the packet that triggered this handler.
5639 * This may not be completely accurate due to limitations
5640 * on the available hardware error information. There is
5641 * a single information register and any number of error
5642 * packets may have occurred and contributed to it before
5643 * this routine is called. This means that:
5644 * a) If multiple packets with the same error occur before
5645 * this routine is called, earlier packets are missed.
5646 * There is only a single bit for each error type.
5647 * b) Errors may not be attributed to the correct VL.
5648 * The driver is attributing all bits in the info register
5649 * to the packet that triggered this call, but bits
5650 * could be an accumulation of different packets with
5651 * different VLs.
5652 * c) A single error packet may have multiple counts attached
5653 * to it. There is no way for the driver to know if
5654 * multiple bits set in the info register are due to a
5655 * single packet or multiple packets. The driver assumes
5656 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005657 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005658 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005659 for (i = 0; i < weight; i++) {
5660 __count_port_discards(ppd);
5661 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5662 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5663 else if (vl == 15)
5664 incr_cntr64(&ppd->port_xmit_discards_vl
5665 [C_VL_15]);
5666 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005667 }
5668}
5669
5670/*
5671 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5672 * register. Does it represent a 'port inactive' error?
5673 */
5674static inline int port_inactive_err(u64 posn)
5675{
5676 return (posn >= SEES(TX_LINKDOWN) &&
5677 posn <= SEES(TX_INCORRECT_LINK_STATE));
5678}
5679
5680/*
5681 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5682 * register. Does it represent a 'disallowed packet' error?
5683 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005684static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005685{
5686 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5687 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5688}
5689
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005690/*
5691 * Input value is a bit position of one of the SDMA engine disallowed
5692 * packet errors. Return which engine. Use of this must be guarded by
5693 * disallowed_pkt_err().
5694 */
5695static inline int disallowed_pkt_engine(int posn)
5696{
5697 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5698}
5699
5700/*
5701 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5702 * be done.
5703 */
5704static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5705{
5706 struct sdma_vl_map *m;
5707 int vl;
5708
5709 /* range check */
5710 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5711 return -1;
5712
5713 rcu_read_lock();
5714 m = rcu_dereference(dd->sdma_map);
5715 vl = m->engine_to_vl[engine];
5716 rcu_read_unlock();
5717
5718 return vl;
5719}
5720
5721/*
5722 * Translate the send context (sofware index) into a VL. Return -1 if the
5723 * translation cannot be done.
5724 */
5725static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5726{
5727 struct send_context_info *sci;
5728 struct send_context *sc;
5729 int i;
5730
5731 sci = &dd->send_contexts[sw_index];
5732
5733 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005734 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005735 return -1;
5736
5737 sc = sci->sc;
5738 if (!sc)
5739 return -1;
5740 if (dd->vld[15].sc == sc)
5741 return 15;
5742 for (i = 0; i < num_vls; i++)
5743 if (dd->vld[i].sc == sc)
5744 return i;
5745
5746 return -1;
5747}
5748
Mike Marciniszyn77241052015-07-30 15:17:43 -04005749static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5750{
5751 u64 reg_copy = reg, handled = 0;
5752 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005753 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005754
5755 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5756 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005757 else if (is_ax(dd) &&
5758 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5759 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005760 start_freeze_handling(dd->pport, 0);
5761
5762 while (reg_copy) {
5763 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005764 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005765 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005766 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005767
5768 if (port_inactive_err(shift)) {
5769 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005770 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005771 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005772 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5773
5774 handle_send_egress_err_info(dd, vl);
5775 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005776 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005777 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005778 }
5779
5780 reg &= ~handled;
5781
5782 if (reg)
5783 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005784 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005785
5786 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5787 if (reg & (1ull << i))
5788 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5789 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005790}
5791
5792static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5793{
5794 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005795 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005796
5797 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005798 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005799
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005800 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5801 if (reg & (1ull << i))
5802 incr_cntr64(&dd->send_err_status_cnt[i]);
5803 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005804}
5805
5806/*
5807 * The maximum number of times the error clear down will loop before
5808 * blocking a repeating error. This value is arbitrary.
5809 */
5810#define MAX_CLEAR_COUNT 20
5811
5812/*
5813 * Clear and handle an error register. All error interrupts are funneled
5814 * through here to have a central location to correctly handle single-
5815 * or multi-shot errors.
5816 *
5817 * For non per-context registers, call this routine with a context value
5818 * of 0 so the per-context offset is zero.
5819 *
5820 * If the handler loops too many times, assume that something is wrong
5821 * and can't be fixed, so mask the error bits.
5822 */
5823static void interrupt_clear_down(struct hfi1_devdata *dd,
5824 u32 context,
5825 const struct err_reg_info *eri)
5826{
5827 u64 reg;
5828 u32 count;
5829
5830 /* read in a loop until no more errors are seen */
5831 count = 0;
5832 while (1) {
5833 reg = read_kctxt_csr(dd, context, eri->status);
5834 if (reg == 0)
5835 break;
5836 write_kctxt_csr(dd, context, eri->clear, reg);
5837 if (likely(eri->handler))
5838 eri->handler(dd, context, reg);
5839 count++;
5840 if (count > MAX_CLEAR_COUNT) {
5841 u64 mask;
5842
5843 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005844 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005845 /*
5846 * Read-modify-write so any other masked bits
5847 * remain masked.
5848 */
5849 mask = read_kctxt_csr(dd, context, eri->mask);
5850 mask &= ~reg;
5851 write_kctxt_csr(dd, context, eri->mask, mask);
5852 break;
5853 }
5854 }
5855}
5856
5857/*
5858 * CCE block "misc" interrupt. Source is < 16.
5859 */
5860static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5861{
5862 const struct err_reg_info *eri = &misc_errs[source];
5863
5864 if (eri->handler) {
5865 interrupt_clear_down(dd, 0, eri);
5866 } else {
5867 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005868 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005869 }
5870}
5871
5872static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5873{
5874 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005875 sc_err_status_flags,
5876 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005877}
5878
5879/*
5880 * Send context error interrupt. Source (hw_context) is < 160.
5881 *
5882 * All send context errors cause the send context to halt. The normal
5883 * clear-down mechanism cannot be used because we cannot clear the
5884 * error bits until several other long-running items are done first.
5885 * This is OK because with the context halted, nothing else is going
5886 * to happen on it anyway.
5887 */
5888static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5889 unsigned int hw_context)
5890{
5891 struct send_context_info *sci;
5892 struct send_context *sc;
5893 char flags[96];
5894 u64 status;
5895 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005896 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005897
5898 sw_index = dd->hw_to_sw[hw_context];
5899 if (sw_index >= dd->num_send_contexts) {
5900 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005901 "out of range sw index %u for send context %u\n",
5902 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005903 return;
5904 }
5905 sci = &dd->send_contexts[sw_index];
5906 sc = sci->sc;
5907 if (!sc) {
5908 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005909 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005910 return;
5911 }
5912
5913 /* tell the software that a halt has begun */
5914 sc_stop(sc, SCF_HALTED);
5915
5916 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5917
5918 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005919 send_context_err_status_string(flags, sizeof(flags),
5920 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005921
5922 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005923 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005924
5925 /*
5926 * Automatically restart halted kernel contexts out of interrupt
5927 * context. User contexts must ask the driver to restart the context.
5928 */
5929 if (sc->type != SC_USER)
5930 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005931
5932 /*
5933 * Update the counters for the corresponding status bits.
5934 * Note that these particular counters are aggregated over all
5935 * 160 contexts.
5936 */
5937 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5938 if (status & (1ull << i))
5939 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5940 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005941}
5942
5943static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5944 unsigned int source, u64 status)
5945{
5946 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005947 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005948
5949 sde = &dd->per_sdma[source];
5950#ifdef CONFIG_SDMA_VERBOSITY
5951 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5952 slashstrip(__FILE__), __LINE__, __func__);
5953 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5954 sde->this_idx, source, (unsigned long long)status);
5955#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005956 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005957 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005958
5959 /*
5960 * Update the counters for the corresponding status bits.
5961 * Note that these particular counters are aggregated over
5962 * all 16 DMA engines.
5963 */
5964 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5965 if (status & (1ull << i))
5966 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5967 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005968}
5969
5970/*
5971 * CCE block SDMA error interrupt. Source is < 16.
5972 */
5973static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5974{
5975#ifdef CONFIG_SDMA_VERBOSITY
5976 struct sdma_engine *sde = &dd->per_sdma[source];
5977
5978 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5979 slashstrip(__FILE__), __LINE__, __func__);
5980 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5981 source);
5982 sdma_dumpstate(sde);
5983#endif
5984 interrupt_clear_down(dd, source, &sdma_eng_err);
5985}
5986
5987/*
5988 * CCE block "various" interrupt. Source is < 8.
5989 */
5990static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5991{
5992 const struct err_reg_info *eri = &various_err[source];
5993
5994 /*
5995 * TCritInt cannot go through interrupt_clear_down()
5996 * because it is not a second tier interrupt. The handler
5997 * should be called directly.
5998 */
5999 if (source == TCRIT_INT_SOURCE)
6000 handle_temp_err(dd);
6001 else if (eri->handler)
6002 interrupt_clear_down(dd, 0, eri);
6003 else
6004 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006005 "%s: Unimplemented/reserved interrupt %d\n",
6006 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006007}
6008
6009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6010{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006011 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006012 struct hfi1_pportdata *ppd = dd->pport;
6013 unsigned long flags;
6014 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6015
6016 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006017 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006018 dd_dev_info(dd, "%s: QSFP module removed\n",
6019 __func__);
6020
Mike Marciniszyn77241052015-07-30 15:17:43 -04006021 ppd->driver_link_ready = 0;
6022 /*
6023 * Cable removed, reset all our information about the
6024 * cache and cable capabilities
6025 */
6026
6027 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6028 /*
6029 * We don't set cache_refresh_required here as we expect
6030 * an interrupt when a cable is inserted
6031 */
6032 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006033 ppd->qsfp_info.reset_needed = 0;
6034 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006035 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006036 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006037 /* Invert the ModPresent pin now to detect plug-in */
6038 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6039 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006040
6041 if ((ppd->offline_disabled_reason >
6042 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006043 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006044 (ppd->offline_disabled_reason ==
6045 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6046 ppd->offline_disabled_reason =
6047 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006048 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006049
Mike Marciniszyn77241052015-07-30 15:17:43 -04006050 if (ppd->host_link_state == HLS_DN_POLL) {
6051 /*
6052 * The link is still in POLL. This means
6053 * that the normal link down processing
6054 * will not happen. We have to do it here
6055 * before turning the DC off.
6056 */
6057 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6058 }
6059 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006060 dd_dev_info(dd, "%s: QSFP module inserted\n",
6061 __func__);
6062
Mike Marciniszyn77241052015-07-30 15:17:43 -04006063 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6064 ppd->qsfp_info.cache_valid = 0;
6065 ppd->qsfp_info.cache_refresh_required = 1;
6066 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006067 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006068
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006069 /*
6070 * Stop inversion of ModPresent pin to detect
6071 * removal of the cable
6072 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006073 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006074 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6075 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6076
6077 ppd->offline_disabled_reason =
6078 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006079 }
6080 }
6081
6082 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006083 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006084 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006085 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6086 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006087 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6088 }
6089
6090 /* Schedule the QSFP work only if there is a cable attached. */
6091 if (qsfp_mod_present(ppd))
6092 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6093}
6094
6095static int request_host_lcb_access(struct hfi1_devdata *dd)
6096{
6097 int ret;
6098
6099 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006100 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6101 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006102 if (ret != HCMD_SUCCESS) {
6103 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006104 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006105 }
6106 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6107}
6108
6109static int request_8051_lcb_access(struct hfi1_devdata *dd)
6110{
6111 int ret;
6112
6113 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006114 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6115 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006116 if (ret != HCMD_SUCCESS) {
6117 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006118 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006119 }
6120 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6121}
6122
6123/*
6124 * Set the LCB selector - allow host access. The DCC selector always
6125 * points to the host.
6126 */
6127static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6128{
6129 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006130 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6131 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006132}
6133
6134/*
6135 * Clear the LCB selector - allow 8051 access. The DCC selector always
6136 * points to the host.
6137 */
6138static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6139{
6140 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006141 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006142}
6143
6144/*
6145 * Acquire LCB access from the 8051. If the host already has access,
6146 * just increment a counter. Otherwise, inform the 8051 that the
6147 * host is taking access.
6148 *
6149 * Returns:
6150 * 0 on success
6151 * -EBUSY if the 8051 has control and cannot be disturbed
6152 * -errno if unable to acquire access from the 8051
6153 */
6154int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6155{
6156 struct hfi1_pportdata *ppd = dd->pport;
6157 int ret = 0;
6158
6159 /*
6160 * Use the host link state lock so the operation of this routine
6161 * { link state check, selector change, count increment } can occur
6162 * as a unit against a link state change. Otherwise there is a
6163 * race between the state change and the count increment.
6164 */
6165 if (sleep_ok) {
6166 mutex_lock(&ppd->hls_lock);
6167 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006168 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006169 udelay(1);
6170 }
6171
6172 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006173 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006174 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006175 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006176 ret = -EBUSY;
6177 goto done;
6178 }
6179
6180 if (dd->lcb_access_count == 0) {
6181 ret = request_host_lcb_access(dd);
6182 if (ret) {
6183 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006184 "%s: unable to acquire LCB access, err %d\n",
6185 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006186 goto done;
6187 }
6188 set_host_lcb_access(dd);
6189 }
6190 dd->lcb_access_count++;
6191done:
6192 mutex_unlock(&ppd->hls_lock);
6193 return ret;
6194}
6195
6196/*
6197 * Release LCB access by decrementing the use count. If the count is moving
6198 * from 1 to 0, inform 8051 that it has control back.
6199 *
6200 * Returns:
6201 * 0 on success
6202 * -errno if unable to release access to the 8051
6203 */
6204int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6205{
6206 int ret = 0;
6207
6208 /*
6209 * Use the host link state lock because the acquire needed it.
6210 * Here, we only need to keep { selector change, count decrement }
6211 * as a unit.
6212 */
6213 if (sleep_ok) {
6214 mutex_lock(&dd->pport->hls_lock);
6215 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006216 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006217 udelay(1);
6218 }
6219
6220 if (dd->lcb_access_count == 0) {
6221 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006222 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006223 goto done;
6224 }
6225
6226 if (dd->lcb_access_count == 1) {
6227 set_8051_lcb_access(dd);
6228 ret = request_8051_lcb_access(dd);
6229 if (ret) {
6230 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006231 "%s: unable to release LCB access, err %d\n",
6232 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006233 /* restore host access if the grant didn't work */
6234 set_host_lcb_access(dd);
6235 goto done;
6236 }
6237 }
6238 dd->lcb_access_count--;
6239done:
6240 mutex_unlock(&dd->pport->hls_lock);
6241 return ret;
6242}
6243
6244/*
6245 * Initialize LCB access variables and state. Called during driver load,
6246 * after most of the initialization is finished.
6247 *
6248 * The DC default is LCB access on for the host. The driver defaults to
6249 * leaving access to the 8051. Assign access now - this constrains the call
6250 * to this routine to be after all LCB set-up is done. In particular, after
6251 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6252 */
6253static void init_lcb_access(struct hfi1_devdata *dd)
6254{
6255 dd->lcb_access_count = 0;
6256}
6257
6258/*
6259 * Write a response back to a 8051 request.
6260 */
6261static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6262{
6263 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006264 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6265 (u64)return_code <<
6266 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6267 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006268}
6269
6270/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006271 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006272 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006273static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006274{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006275 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006276 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006277 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006278 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006279
6280 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6281 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6282 return; /* no request */
6283
6284 /* zero out COMPLETED so the response is seen */
6285 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6286
6287 /* extract request details */
6288 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6289 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6290 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6291 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6292
6293 switch (type) {
6294 case HREQ_LOAD_CONFIG:
6295 case HREQ_SAVE_CONFIG:
6296 case HREQ_READ_CONFIG:
6297 case HREQ_SET_TX_EQ_ABS:
6298 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006299 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006300 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006301 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006302 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6303 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006304 case HREQ_CONFIG_DONE:
6305 hreq_response(dd, HREQ_SUCCESS, 0);
6306 break;
6307
6308 case HREQ_INTERFACE_TEST:
6309 hreq_response(dd, HREQ_SUCCESS, data);
6310 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006311 default:
6312 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6313 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6314 break;
6315 }
6316}
6317
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006318/*
6319 * Set up allocation unit vaulue.
6320 */
6321void set_up_vau(struct hfi1_devdata *dd, u8 vau)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006322{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006323 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6324
6325 /* do not modify other values in the register */
6326 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6327 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6328 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006329}
6330
6331/*
6332 * Set up initial VL15 credits of the remote. Assumes the rest of
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006333 * the CM credit registers are zero from a previous global or credit reset.
6334 * Shared limit for VL15 will always be 0.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006335 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006336void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006337{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006338 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6339
6340 /* set initial values for total and shared credit limit */
6341 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6342 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6343
6344 /*
6345 * Set total limit to be equal to VL15 credits.
6346 * Leave shared limit at 0.
6347 */
6348 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6349 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006350
Dennis Dalessandroeacc8302016-10-17 04:19:52 -07006351 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6352 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006353}
6354
6355/*
6356 * Zero all credit details from the previous connection and
6357 * reset the CM manager's internal counters.
6358 */
6359void reset_link_credits(struct hfi1_devdata *dd)
6360{
6361 int i;
6362
6363 /* remove all previous VL credit limits */
6364 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006365 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006366 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006367 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006368 /* reset the CM block */
6369 pio_send_control(dd, PSC_CM_RESET);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006370 /* reset cached value */
6371 dd->vl15buf_cached = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006372}
6373
6374/* convert a vCU to a CU */
6375static u32 vcu_to_cu(u8 vcu)
6376{
6377 return 1 << vcu;
6378}
6379
6380/* convert a CU to a vCU */
6381static u8 cu_to_vcu(u32 cu)
6382{
6383 return ilog2(cu);
6384}
6385
6386/* convert a vAU to an AU */
6387static u32 vau_to_au(u8 vau)
6388{
6389 return 8 * (1 << vau);
6390}
6391
6392static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6393{
6394 ppd->sm_trap_qp = 0x0;
6395 ppd->sa_qp = 0x1;
6396}
6397
6398/*
6399 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6400 */
6401static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6402{
6403 u64 reg;
6404
6405 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6406 write_csr(dd, DC_LCB_CFG_RUN, 0);
6407 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6408 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006409 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006410 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6411 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6412 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006413 write_csr(dd, DCC_CFG_RESET, reg |
6414 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6415 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006416 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006417 if (!abort) {
6418 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6419 write_csr(dd, DCC_CFG_RESET, reg);
6420 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6421 }
6422}
6423
6424/*
6425 * This routine should be called after the link has been transitioned to
6426 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6427 * reset).
6428 *
6429 * The expectation is that the caller of this routine would have taken
6430 * care of properly transitioning the link into the correct state.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006431 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6432 * before calling this function.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006433 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006434static void _dc_shutdown(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006435{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006436 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006437
Tadeusz Struk22546b72017-04-28 10:40:02 -07006438 if (dd->dc_shutdown)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006439 return;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006440
Mike Marciniszyn77241052015-07-30 15:17:43 -04006441 dd->dc_shutdown = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006442 /* Shutdown the LCB */
6443 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006444 /*
6445 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006446 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006447 * itself.
6448 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006449 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6450}
6451
Tadeusz Struk22546b72017-04-28 10:40:02 -07006452static void dc_shutdown(struct hfi1_devdata *dd)
6453{
6454 mutex_lock(&dd->dc8051_lock);
6455 _dc_shutdown(dd);
6456 mutex_unlock(&dd->dc8051_lock);
6457}
6458
Jubin John4d114fd2016-02-14 20:21:43 -08006459/*
6460 * Calling this after the DC has been brought out of reset should not
6461 * do any damage.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006462 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6463 * before calling this function.
Jubin John4d114fd2016-02-14 20:21:43 -08006464 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006465static void _dc_start(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006466{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006467 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006468
Mike Marciniszyn77241052015-07-30 15:17:43 -04006469 if (!dd->dc_shutdown)
Tadeusz Struk22546b72017-04-28 10:40:02 -07006470 return;
6471
Mike Marciniszyn77241052015-07-30 15:17:43 -04006472 /* Take the 8051 out of reset */
6473 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6474 /* Wait until 8051 is ready */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006475 if (wait_fm_ready(dd, TIMEOUT_8051_START))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006476 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006477 __func__);
Tadeusz Struk22546b72017-04-28 10:40:02 -07006478
Mike Marciniszyn77241052015-07-30 15:17:43 -04006479 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6480 write_csr(dd, DCC_CFG_RESET, 0x10);
6481 /* lcb_shutdown() with abort=1 does not restore these */
6482 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006483 dd->dc_shutdown = 0;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006484}
6485
6486static void dc_start(struct hfi1_devdata *dd)
6487{
6488 mutex_lock(&dd->dc8051_lock);
6489 _dc_start(dd);
6490 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006491}
6492
6493/*
6494 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6495 */
6496static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6497{
6498 u64 rx_radr, tx_radr;
6499 u32 version;
6500
6501 if (dd->icode != ICODE_FPGA_EMULATION)
6502 return;
6503
6504 /*
6505 * These LCB defaults on emulator _s are good, nothing to do here:
6506 * LCB_CFG_TX_FIFOS_RADR
6507 * LCB_CFG_RX_FIFOS_RADR
6508 * LCB_CFG_LN_DCLK
6509 * LCB_CFG_IGNORE_LOST_RCLK
6510 */
6511 if (is_emulator_s(dd))
6512 return;
6513 /* else this is _p */
6514
6515 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006516 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006517 version = 0x2d; /* all B0 use 0x2d or higher settings */
6518
6519 if (version <= 0x12) {
6520 /* release 0x12 and below */
6521
6522 /*
6523 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6524 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6525 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6526 */
6527 rx_radr =
6528 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6529 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6530 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6531 /*
6532 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6533 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6534 */
6535 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6536 } else if (version <= 0x18) {
6537 /* release 0x13 up to 0x18 */
6538 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6539 rx_radr =
6540 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6541 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6542 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6543 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6544 } else if (version == 0x19) {
6545 /* release 0x19 */
6546 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6547 rx_radr =
6548 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6549 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6550 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6551 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6552 } else if (version == 0x1a) {
6553 /* release 0x1a */
6554 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6555 rx_radr =
6556 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6557 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6558 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6559 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6560 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6561 } else {
6562 /* release 0x1b and higher */
6563 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6564 rx_radr =
6565 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6566 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6567 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6568 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6569 }
6570
6571 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6572 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6573 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006574 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006575 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6576}
6577
6578/*
6579 * Handle a SMA idle message
6580 *
6581 * This is a work-queue function outside of the interrupt.
6582 */
6583void handle_sma_message(struct work_struct *work)
6584{
6585 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6586 sma_message_work);
6587 struct hfi1_devdata *dd = ppd->dd;
6588 u64 msg;
6589 int ret;
6590
Jubin John4d114fd2016-02-14 20:21:43 -08006591 /*
6592 * msg is bytes 1-4 of the 40-bit idle message - the command code
6593 * is stripped off
6594 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006595 ret = read_idle_sma(dd, &msg);
6596 if (ret)
6597 return;
6598 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6599 /*
6600 * React to the SMA message. Byte[1] (0 for us) is the command.
6601 */
6602 switch (msg & 0xff) {
6603 case SMA_IDLE_ARM:
6604 /*
6605 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6606 * State Transitions
6607 *
6608 * Only expected in INIT or ARMED, discard otherwise.
6609 */
6610 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6611 ppd->neighbor_normal = 1;
6612 break;
6613 case SMA_IDLE_ACTIVE:
6614 /*
6615 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6616 * State Transitions
6617 *
6618 * Can activate the node. Discard otherwise.
6619 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006620 if (ppd->host_link_state == HLS_UP_ARMED &&
6621 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006622 ppd->neighbor_normal = 1;
6623 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6624 if (ret)
6625 dd_dev_err(
6626 dd,
6627 "%s: received Active SMA idle message, couldn't set link to Active\n",
6628 __func__);
6629 }
6630 break;
6631 default:
6632 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006633 "%s: received unexpected SMA idle message 0x%llx\n",
6634 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006635 break;
6636 }
6637}
6638
6639static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6640{
6641 u64 rcvctrl;
6642 unsigned long flags;
6643
6644 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6645 rcvctrl = read_csr(dd, RCV_CTRL);
6646 rcvctrl |= add;
6647 rcvctrl &= ~clear;
6648 write_csr(dd, RCV_CTRL, rcvctrl);
6649 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6650}
6651
6652static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6653{
6654 adjust_rcvctrl(dd, add, 0);
6655}
6656
6657static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6658{
6659 adjust_rcvctrl(dd, 0, clear);
6660}
6661
6662/*
6663 * Called from all interrupt handlers to start handling an SPC freeze.
6664 */
6665void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6666{
6667 struct hfi1_devdata *dd = ppd->dd;
6668 struct send_context *sc;
6669 int i;
6670
6671 if (flags & FREEZE_SELF)
6672 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6673
6674 /* enter frozen mode */
6675 dd->flags |= HFI1_FROZEN;
6676
6677 /* notify all SDMA engines that they are going into a freeze */
6678 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6679
6680 /* do halt pre-handling on all enabled send contexts */
6681 for (i = 0; i < dd->num_send_contexts; i++) {
6682 sc = dd->send_contexts[i].sc;
6683 if (sc && (sc->flags & SCF_ENABLED))
6684 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6685 }
6686
6687 /* Send context are frozen. Notify user space */
6688 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6689
6690 if (flags & FREEZE_ABORT) {
6691 dd_dev_err(dd,
6692 "Aborted freeze recovery. Please REBOOT system\n");
6693 return;
6694 }
6695 /* queue non-interrupt handler */
6696 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6697}
6698
6699/*
6700 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6701 * depending on the "freeze" parameter.
6702 *
6703 * No need to return an error if it times out, our only option
6704 * is to proceed anyway.
6705 */
6706static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6707{
6708 unsigned long timeout;
6709 u64 reg;
6710
6711 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6712 while (1) {
6713 reg = read_csr(dd, CCE_STATUS);
6714 if (freeze) {
6715 /* waiting until all indicators are set */
6716 if ((reg & ALL_FROZE) == ALL_FROZE)
6717 return; /* all done */
6718 } else {
6719 /* waiting until all indicators are clear */
6720 if ((reg & ALL_FROZE) == 0)
6721 return; /* all done */
6722 }
6723
6724 if (time_after(jiffies, timeout)) {
6725 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006726 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6727 freeze ? "" : "un", reg & ALL_FROZE,
6728 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006729 return;
6730 }
6731 usleep_range(80, 120);
6732 }
6733}
6734
6735/*
6736 * Do all freeze handling for the RXE block.
6737 */
6738static void rxe_freeze(struct hfi1_devdata *dd)
6739{
6740 int i;
6741
6742 /* disable port */
6743 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6744
6745 /* disable all receive contexts */
6746 for (i = 0; i < dd->num_rcv_contexts; i++)
6747 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6748}
6749
6750/*
6751 * Unfreeze handling for the RXE block - kernel contexts only.
6752 * This will also enable the port. User contexts will do unfreeze
6753 * handling on a per-context basis as they call into the driver.
6754 *
6755 */
6756static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6757{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006758 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006759 int i;
6760
6761 /* enable all kernel contexts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07006762 for (i = 0; i < dd->num_rcv_contexts; i++) {
6763 struct hfi1_ctxtdata *rcd = dd->rcd[i];
6764
6765 /* Ensure all non-user contexts(including vnic) are enabled */
6766 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
6767 continue;
6768
Mitko Haralanov566c1572016-02-03 14:32:49 -08006769 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6770 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6771 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6772 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6773 hfi1_rcvctrl(dd, rcvmask, i);
6774 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006775
6776 /* enable port */
6777 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6778}
6779
6780/*
6781 * Non-interrupt SPC freeze handling.
6782 *
6783 * This is a work-queue function outside of the triggering interrupt.
6784 */
6785void handle_freeze(struct work_struct *work)
6786{
6787 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6788 freeze_work);
6789 struct hfi1_devdata *dd = ppd->dd;
6790
6791 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006792 wait_for_freeze_status(dd, 1);
6793
6794 /* SPC is now frozen */
6795
6796 /* do send PIO freeze steps */
6797 pio_freeze(dd);
6798
6799 /* do send DMA freeze steps */
6800 sdma_freeze(dd);
6801
6802 /* do send egress freeze steps - nothing to do */
6803
6804 /* do receive freeze steps */
6805 rxe_freeze(dd);
6806
6807 /*
6808 * Unfreeze the hardware - clear the freeze, wait for each
6809 * block's frozen bit to clear, then clear the frozen flag.
6810 */
6811 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6812 wait_for_freeze_status(dd, 0);
6813
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006814 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006815 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6816 wait_for_freeze_status(dd, 1);
6817 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6818 wait_for_freeze_status(dd, 0);
6819 }
6820
6821 /* do send PIO unfreeze steps for kernel contexts */
6822 pio_kernel_unfreeze(dd);
6823
6824 /* do send DMA unfreeze steps */
6825 sdma_unfreeze(dd);
6826
6827 /* do send egress unfreeze steps - nothing to do */
6828
6829 /* do receive unfreeze steps for kernel contexts */
6830 rxe_kernel_unfreeze(dd);
6831
6832 /*
6833 * The unfreeze procedure touches global device registers when
6834 * it disables and re-enables RXE. Mark the device unfrozen
6835 * after all that is done so other parts of the driver waiting
6836 * for the device to unfreeze don't do things out of order.
6837 *
6838 * The above implies that the meaning of HFI1_FROZEN flag is
6839 * "Device has gone into freeze mode and freeze mode handling
6840 * is still in progress."
6841 *
6842 * The flag will be removed when freeze mode processing has
6843 * completed.
6844 */
6845 dd->flags &= ~HFI1_FROZEN;
6846 wake_up(&dd->event_queue);
6847
6848 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006849}
6850
6851/*
6852 * Handle a link up interrupt from the 8051.
6853 *
6854 * This is a work-queue function outside of the interrupt.
6855 */
6856void handle_link_up(struct work_struct *work)
6857{
6858 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006859 link_up_work);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006860 struct hfi1_devdata *dd = ppd->dd;
6861
Mike Marciniszyn77241052015-07-30 15:17:43 -04006862 set_link_state(ppd, HLS_UP_INIT);
6863
6864 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006865 read_ltp_rtt(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006866 /*
6867 * OPA specifies that certain counters are cleared on a transition
6868 * to link up, so do that.
6869 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006870 clear_linkup_counters(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006871 /*
6872 * And (re)set link up default values.
6873 */
6874 set_linkup_defaults(ppd);
6875
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006876 /*
6877 * Set VL15 credits. Use cached value from verify cap interrupt.
6878 * In case of quick linkup or simulator, vl15 value will be set by
6879 * handle_linkup_change. VerifyCap interrupt handler will not be
6880 * called in those scenarios.
6881 */
6882 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6883 set_up_vl15(dd, dd->vl15buf_cached);
6884
Mike Marciniszyn77241052015-07-30 15:17:43 -04006885 /* enforce link speed enabled */
6886 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6887 /* oops - current speed is not enabled, bounce */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006888 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006889 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6890 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006891 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006892 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006893 set_link_state(ppd, HLS_DN_OFFLINE);
6894 start_link(ppd);
6895 }
6896}
6897
Jubin John4d114fd2016-02-14 20:21:43 -08006898/*
6899 * Several pieces of LNI information were cached for SMA in ppd.
6900 * Reset these on link down
6901 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006902static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6903{
6904 ppd->neighbor_guid = 0;
6905 ppd->neighbor_port_number = 0;
6906 ppd->neighbor_type = 0;
6907 ppd->neighbor_fm_security = 0;
6908}
6909
Dean Luickfeb831d2016-04-14 08:31:36 -07006910static const char * const link_down_reason_strs[] = {
6911 [OPA_LINKDOWN_REASON_NONE] = "None",
Dennis Dalessandro67838e62017-05-29 17:18:46 -07006912 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
Dean Luickfeb831d2016-04-14 08:31:36 -07006913 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6914 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6915 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6916 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6917 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6918 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6919 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6920 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6921 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6922 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6923 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6924 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6925 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6926 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6927 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6928 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6929 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6930 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6931 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6932 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6933 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6934 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6935 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6936 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6937 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6938 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6939 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6940 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6941 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6942 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6943 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6944 "Excessive buffer overrun",
6945 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6946 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6947 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6948 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6949 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6950 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6951 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6952 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6953 "Local media not installed",
6954 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6955 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6956 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6957 "End to end not installed",
6958 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6959 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6960 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6961 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6962 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6963 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6964};
6965
6966/* return the neighbor link down reason string */
6967static const char *link_down_reason_str(u8 reason)
6968{
6969 const char *str = NULL;
6970
6971 if (reason < ARRAY_SIZE(link_down_reason_strs))
6972 str = link_down_reason_strs[reason];
6973 if (!str)
6974 str = "(invalid)";
6975
6976 return str;
6977}
6978
Mike Marciniszyn77241052015-07-30 15:17:43 -04006979/*
6980 * Handle a link down interrupt from the 8051.
6981 *
6982 * This is a work-queue function outside of the interrupt.
6983 */
6984void handle_link_down(struct work_struct *work)
6985{
6986 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006987 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006988 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006989 link_down_work);
6990 int was_up;
6991 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006992
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006993 if ((ppd->host_link_state &
6994 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6995 ppd->port_type == PORT_TYPE_FIXED)
6996 ppd->offline_disabled_reason =
6997 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6998
6999 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07007000 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007001 set_link_state(ppd, HLS_DN_OFFLINE);
7002
Dean Luickfeb831d2016-04-14 08:31:36 -07007003 if (was_up) {
7004 lcl_reason = 0;
7005 /* link down reason is only valid if the link was up */
7006 read_link_down_reason(ppd->dd, &link_down_reason);
7007 switch (link_down_reason) {
7008 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7009 /* the link went down, no idle message reason */
7010 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7011 ldr_str);
7012 break;
7013 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7014 /*
7015 * The neighbor reason is only valid if an idle message
7016 * was received for it.
7017 */
7018 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7019 dd_dev_info(ppd->dd,
7020 "%sNeighbor link down message %d, %s\n",
7021 ldr_str, neigh_reason,
7022 link_down_reason_str(neigh_reason));
7023 break;
7024 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7025 dd_dev_info(ppd->dd,
7026 "%sHost requested link to go offline\n",
7027 ldr_str);
7028 break;
7029 default:
7030 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7031 ldr_str, link_down_reason);
7032 break;
7033 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007034
Dean Luickfeb831d2016-04-14 08:31:36 -07007035 /*
7036 * If no reason, assume peer-initiated but missed
7037 * LinkGoingDown idle flits.
7038 */
7039 if (neigh_reason == 0)
7040 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7041 } else {
7042 /* went down while polling or going up */
7043 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7044 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007045
7046 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7047
Dean Luick015e91f2016-04-14 08:31:42 -07007048 /* inform the SMA when the link transitions from up to down */
7049 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7050 ppd->neigh_link_down_reason.sma == 0) {
7051 ppd->local_link_down_reason.sma =
7052 ppd->local_link_down_reason.latest;
7053 ppd->neigh_link_down_reason.sma =
7054 ppd->neigh_link_down_reason.latest;
7055 }
7056
Mike Marciniszyn77241052015-07-30 15:17:43 -04007057 reset_neighbor_info(ppd);
7058
7059 /* disable the port */
7060 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7061
Jubin John4d114fd2016-02-14 20:21:43 -08007062 /*
7063 * If there is no cable attached, turn the DC off. Otherwise,
7064 * start the link bring up.
7065 */
Dean Luick0db9dec2016-09-06 04:35:20 -07007066 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04007067 dc_shutdown(ppd->dd);
Dean Luick0db9dec2016-09-06 04:35:20 -07007068 else
Mike Marciniszyn77241052015-07-30 15:17:43 -04007069 start_link(ppd);
7070}
7071
7072void handle_link_bounce(struct work_struct *work)
7073{
7074 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7075 link_bounce_work);
7076
7077 /*
7078 * Only do something if the link is currently up.
7079 */
7080 if (ppd->host_link_state & HLS_UP) {
7081 set_link_state(ppd, HLS_DN_OFFLINE);
7082 start_link(ppd);
7083 } else {
7084 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007085 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007086 }
7087}
7088
7089/*
7090 * Mask conversion: Capability exchange to Port LTP. The capability
7091 * exchange has an implicit 16b CRC that is mandatory.
7092 */
7093static int cap_to_port_ltp(int cap)
7094{
7095 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7096
7097 if (cap & CAP_CRC_14B)
7098 port_ltp |= PORT_LTP_CRC_MODE_14;
7099 if (cap & CAP_CRC_48B)
7100 port_ltp |= PORT_LTP_CRC_MODE_48;
7101 if (cap & CAP_CRC_12B_16B_PER_LANE)
7102 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7103
7104 return port_ltp;
7105}
7106
7107/*
7108 * Convert an OPA Port LTP mask to capability mask
7109 */
7110int port_ltp_to_cap(int port_ltp)
7111{
7112 int cap_mask = 0;
7113
7114 if (port_ltp & PORT_LTP_CRC_MODE_14)
7115 cap_mask |= CAP_CRC_14B;
7116 if (port_ltp & PORT_LTP_CRC_MODE_48)
7117 cap_mask |= CAP_CRC_48B;
7118 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7119 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7120
7121 return cap_mask;
7122}
7123
7124/*
7125 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7126 */
7127static int lcb_to_port_ltp(int lcb_crc)
7128{
7129 int port_ltp = 0;
7130
7131 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7132 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7133 else if (lcb_crc == LCB_CRC_48B)
7134 port_ltp = PORT_LTP_CRC_MODE_48;
7135 else if (lcb_crc == LCB_CRC_14B)
7136 port_ltp = PORT_LTP_CRC_MODE_14;
7137 else
7138 port_ltp = PORT_LTP_CRC_MODE_16;
7139
7140 return port_ltp;
7141}
7142
7143/*
7144 * Our neighbor has indicated that we are allowed to act as a fabric
7145 * manager, so place the full management partition key in the second
7146 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7147 * that we should already have the limited management partition key in
7148 * array element 1, and also that the port is not yet up when
7149 * add_full_mgmt_pkey() is invoked.
7150 */
7151static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7152{
7153 struct hfi1_devdata *dd = ppd->dd;
7154
Dennis Dalessandroa498fbc2017-04-09 10:17:06 -07007155 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
Dean Luick87645222015-12-01 15:38:21 -05007156 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7157 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7158 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007159 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7160 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007161 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007162}
7163
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007164static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007165{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007166 if (ppd->pkeys[2] != 0) {
7167 ppd->pkeys[2] = 0;
7168 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007169 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007170 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007171}
7172
Mike Marciniszyn77241052015-07-30 15:17:43 -04007173/*
7174 * Convert the given link width to the OPA link width bitmask.
7175 */
7176static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7177{
7178 switch (width) {
7179 case 0:
7180 /*
7181 * Simulator and quick linkup do not set the width.
7182 * Just set it to 4x without complaint.
7183 */
7184 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7185 return OPA_LINK_WIDTH_4X;
7186 return 0; /* no lanes up */
7187 case 1: return OPA_LINK_WIDTH_1X;
7188 case 2: return OPA_LINK_WIDTH_2X;
7189 case 3: return OPA_LINK_WIDTH_3X;
7190 default:
7191 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007192 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007193 /* fall through */
7194 case 4: return OPA_LINK_WIDTH_4X;
7195 }
7196}
7197
7198/*
7199 * Do a population count on the bottom nibble.
7200 */
7201static const u8 bit_counts[16] = {
7202 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7203};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007204
Mike Marciniszyn77241052015-07-30 15:17:43 -04007205static inline u8 nibble_to_count(u8 nibble)
7206{
7207 return bit_counts[nibble & 0xf];
7208}
7209
7210/*
7211 * Read the active lane information from the 8051 registers and return
7212 * their widths.
7213 *
7214 * Active lane information is found in these 8051 registers:
7215 * enable_lane_tx
7216 * enable_lane_rx
7217 */
7218static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7219 u16 *rx_width)
7220{
7221 u16 tx, rx;
7222 u8 enable_lane_rx;
7223 u8 enable_lane_tx;
7224 u8 tx_polarity_inversion;
7225 u8 rx_polarity_inversion;
7226 u8 max_rate;
7227
7228 /* read the active lanes */
7229 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007230 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007231 read_local_lni(dd, &enable_lane_rx);
7232
7233 /* convert to counts */
7234 tx = nibble_to_count(enable_lane_tx);
7235 rx = nibble_to_count(enable_lane_rx);
7236
7237 /*
7238 * Set link_speed_active here, overriding what was set in
7239 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7240 * set the max_rate field in handle_verify_cap until v0.19.
7241 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007242 if ((dd->icode == ICODE_RTL_SILICON) &&
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007243 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007244 /* max_rate: 0 = 12.5G, 1 = 25G */
7245 switch (max_rate) {
7246 case 0:
7247 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7248 break;
7249 default:
7250 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007251 "%s: unexpected max rate %d, using 25Gb\n",
7252 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007253 /* fall through */
7254 case 1:
7255 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7256 break;
7257 }
7258 }
7259
7260 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007261 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7262 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007263 *tx_width = link_width_to_bits(dd, tx);
7264 *rx_width = link_width_to_bits(dd, rx);
7265}
7266
7267/*
7268 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7269 * Valid after the end of VerifyCap and during LinkUp. Does not change
7270 * after link up. I.e. look elsewhere for downgrade information.
7271 *
7272 * Bits are:
7273 * + bits [7:4] contain the number of active transmitters
7274 * + bits [3:0] contain the number of active receivers
7275 * These are numbers 1 through 4 and can be different values if the
7276 * link is asymmetric.
7277 *
7278 * verify_cap_local_fm_link_width[0] retains its original value.
7279 */
7280static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7281 u16 *rx_width)
7282{
7283 u16 widths, tx, rx;
7284 u8 misc_bits, local_flags;
7285 u16 active_tx, active_rx;
7286
7287 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7288 tx = widths >> 12;
7289 rx = (widths >> 8) & 0xf;
7290
7291 *tx_width = link_width_to_bits(dd, tx);
7292 *rx_width = link_width_to_bits(dd, rx);
7293
7294 /* print the active widths */
7295 get_link_widths(dd, &active_tx, &active_rx);
7296}
7297
7298/*
7299 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7300 * hardware information when the link first comes up.
7301 *
7302 * The link width is not available until after VerifyCap.AllFramesReceived
7303 * (the trigger for handle_verify_cap), so this is outside that routine
7304 * and should be called when the 8051 signals linkup.
7305 */
7306void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7307{
7308 u16 tx_width, rx_width;
7309
7310 /* get end-of-LNI link widths */
7311 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7312
7313 /* use tx_width as the link is supposed to be symmetric on link up */
7314 ppd->link_width_active = tx_width;
7315 /* link width downgrade active (LWD.A) starts out matching LW.A */
7316 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7317 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7318 /* per OPA spec, on link up LWD.E resets to LWD.S */
7319 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7320 /* cache the active egress rate (units {10^6 bits/sec]) */
7321 ppd->current_egress_rate = active_egress_rate(ppd);
7322}
7323
7324/*
7325 * Handle a verify capabilities interrupt from the 8051.
7326 *
7327 * This is a work-queue function outside of the interrupt.
7328 */
7329void handle_verify_cap(struct work_struct *work)
7330{
7331 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7332 link_vc_work);
7333 struct hfi1_devdata *dd = ppd->dd;
7334 u64 reg;
7335 u8 power_management;
7336 u8 continious;
7337 u8 vcu;
7338 u8 vau;
7339 u8 z;
7340 u16 vl15buf;
7341 u16 link_widths;
7342 u16 crc_mask;
7343 u16 crc_val;
7344 u16 device_id;
7345 u16 active_tx, active_rx;
7346 u8 partner_supported_crc;
7347 u8 remote_tx_rate;
7348 u8 device_rev;
7349
7350 set_link_state(ppd, HLS_VERIFY_CAP);
7351
7352 lcb_shutdown(dd, 0);
7353 adjust_lcb_for_fpga_serdes(dd);
7354
Mike Marciniszyn77241052015-07-30 15:17:43 -04007355 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007356 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7357 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007358 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7359 read_remote_device_id(dd, &device_id, &device_rev);
7360 /*
7361 * And the 'MgmtAllowed' information, which is exchanged during
7362 * LNI, is also be available at this point.
7363 */
7364 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7365 /* print the active widths */
7366 get_link_widths(dd, &active_tx, &active_rx);
7367 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007368 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7369 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007370 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007371 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7372 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7373 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007374 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007375 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007376 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007377 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007378 /*
7379 * The peer vAU value just read is the peer receiver value. HFI does
7380 * not support a transmit vAU of 0 (AU == 8). We advertised that
7381 * with Z=1 in the fabric capabilities sent to the peer. The peer
7382 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7383 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7384 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7385 * subject to the Z value exception.
7386 */
7387 if (vau == 0)
7388 vau = 1;
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07007389 set_up_vau(dd, vau);
7390
7391 /*
7392 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7393 * credits value and wait for link-up interrupt ot set it.
7394 */
7395 set_up_vl15(dd, 0);
7396 dd->vl15buf_cached = vl15buf;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007397
7398 /* set up the LCB CRC mode */
7399 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7400
7401 /* order is important: use the lowest bit in common */
7402 if (crc_mask & CAP_CRC_14B)
7403 crc_val = LCB_CRC_14B;
7404 else if (crc_mask & CAP_CRC_48B)
7405 crc_val = LCB_CRC_48B;
7406 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7407 crc_val = LCB_CRC_12B_16B_PER_LANE;
7408 else
7409 crc_val = LCB_CRC_16B;
7410
7411 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7412 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7413 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7414
7415 /* set (14b only) or clear sideband credit */
7416 reg = read_csr(dd, SEND_CM_CTRL);
7417 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7418 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007419 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007420 } else {
7421 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007422 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007423 }
7424
7425 ppd->link_speed_active = 0; /* invalid value */
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007426 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007427 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7428 switch (remote_tx_rate) {
7429 case 0:
7430 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7431 break;
7432 case 1:
7433 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7434 break;
7435 }
7436 } else {
7437 /* actual rate is highest bit of the ANDed rates */
7438 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7439
7440 if (rate & 2)
7441 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7442 else if (rate & 1)
7443 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7444 }
7445 if (ppd->link_speed_active == 0) {
7446 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007447 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007448 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7449 }
7450
7451 /*
7452 * Cache the values of the supported, enabled, and active
7453 * LTP CRC modes to return in 'portinfo' queries. But the bit
7454 * flags that are returned in the portinfo query differ from
7455 * what's in the link_crc_mask, crc_sizes, and crc_val
7456 * variables. Convert these here.
7457 */
7458 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7459 /* supported crc modes */
7460 ppd->port_ltp_crc_mode |=
7461 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7462 /* enabled crc modes */
7463 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7464 /* active crc mode */
7465
7466 /* set up the remote credit return table */
7467 assign_remote_cm_au_table(dd, vcu);
7468
7469 /*
7470 * The LCB is reset on entry to handle_verify_cap(), so this must
7471 * be applied on every link up.
7472 *
7473 * Adjust LCB error kill enable to kill the link if
7474 * these RBUF errors are seen:
7475 * REPLAY_BUF_MBE_SMASK
7476 * FLIT_INPUT_BUF_MBE_SMASK
7477 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007478 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007479 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7480 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7481 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7482 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7483 }
7484
7485 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7486 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7487
7488 /* give 8051 access to the LCB CSRs */
7489 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7490 set_8051_lcb_access(dd);
7491
Mike Marciniszyn77241052015-07-30 15:17:43 -04007492 if (ppd->mgmt_allowed)
7493 add_full_mgmt_pkey(ppd);
7494
7495 /* tell the 8051 to go to LinkUp */
7496 set_link_state(ppd, HLS_GOING_UP);
7497}
7498
7499/*
7500 * Apply the link width downgrade enabled policy against the current active
7501 * link widths.
7502 *
7503 * Called when the enabled policy changes or the active link widths change.
7504 */
7505void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7506{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007507 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007508 int tries;
7509 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007510 u16 tx, rx;
7511
Dean Luick323fd782015-11-16 21:59:24 -05007512 /* use the hls lock to avoid a race with actual link up */
7513 tries = 0;
7514retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007515 mutex_lock(&ppd->hls_lock);
7516 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007517 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007518 /* still going up..wait and retry */
7519 if (ppd->host_link_state & HLS_GOING_UP) {
7520 if (++tries < 1000) {
7521 mutex_unlock(&ppd->hls_lock);
7522 usleep_range(100, 120); /* arbitrary */
7523 goto retry;
7524 }
7525 dd_dev_err(ppd->dd,
7526 "%s: giving up waiting for link state change\n",
7527 __func__);
7528 }
7529 goto done;
7530 }
7531
7532 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007533
7534 if (refresh_widths) {
7535 get_link_widths(ppd->dd, &tx, &rx);
7536 ppd->link_width_downgrade_tx_active = tx;
7537 ppd->link_width_downgrade_rx_active = rx;
7538 }
7539
Dean Luickf9b56352016-04-14 08:31:30 -07007540 if (ppd->link_width_downgrade_tx_active == 0 ||
7541 ppd->link_width_downgrade_rx_active == 0) {
7542 /* the 8051 reported a dead link as a downgrade */
7543 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7544 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007545 /* downgrade is disabled */
7546
7547 /* bounce if not at starting active width */
7548 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007549 ppd->link_width_downgrade_tx_active) ||
7550 (ppd->link_width_active !=
7551 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007552 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007553 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007554 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007555 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7556 ppd->link_width_active,
7557 ppd->link_width_downgrade_tx_active,
7558 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007559 do_bounce = 1;
7560 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007561 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7562 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007563 /* Tx or Rx is outside the enabled policy */
7564 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007565 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007566 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007567 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7568 lwde, ppd->link_width_downgrade_tx_active,
7569 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007570 do_bounce = 1;
7571 }
7572
Dean Luick323fd782015-11-16 21:59:24 -05007573done:
7574 mutex_unlock(&ppd->hls_lock);
7575
Mike Marciniszyn77241052015-07-30 15:17:43 -04007576 if (do_bounce) {
7577 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007578 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007579 set_link_state(ppd, HLS_DN_OFFLINE);
7580 start_link(ppd);
7581 }
7582}
7583
7584/*
7585 * Handle a link downgrade interrupt from the 8051.
7586 *
7587 * This is a work-queue function outside of the interrupt.
7588 */
7589void handle_link_downgrade(struct work_struct *work)
7590{
7591 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7592 link_downgrade_work);
7593
7594 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7595 apply_link_downgrade_policy(ppd, 1);
7596}
7597
7598static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7599{
7600 return flag_string(buf, buf_len, flags, dcc_err_flags,
7601 ARRAY_SIZE(dcc_err_flags));
7602}
7603
7604static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7605{
7606 return flag_string(buf, buf_len, flags, lcb_err_flags,
7607 ARRAY_SIZE(lcb_err_flags));
7608}
7609
7610static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7611{
7612 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7613 ARRAY_SIZE(dc8051_err_flags));
7614}
7615
7616static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7617{
7618 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7619 ARRAY_SIZE(dc8051_info_err_flags));
7620}
7621
7622static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7623{
7624 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7625 ARRAY_SIZE(dc8051_info_host_msg_flags));
7626}
7627
7628static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7629{
7630 struct hfi1_pportdata *ppd = dd->pport;
7631 u64 info, err, host_msg;
7632 int queue_link_down = 0;
7633 char buf[96];
7634
7635 /* look at the flags */
7636 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7637 /* 8051 information set by firmware */
7638 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7639 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7640 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7641 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7642 host_msg = (info >>
7643 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7644 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7645
7646 /*
7647 * Handle error flags.
7648 */
7649 if (err & FAILED_LNI) {
7650 /*
7651 * LNI error indications are cleared by the 8051
7652 * only when starting polling. Only pay attention
7653 * to them when in the states that occur during
7654 * LNI.
7655 */
7656 if (ppd->host_link_state
7657 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7658 queue_link_down = 1;
7659 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007660 dc8051_info_err_string(buf,
7661 sizeof(buf),
7662 err &
7663 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007664 }
7665 err &= ~(u64)FAILED_LNI;
7666 }
Dean Luick6d014532015-12-01 15:38:23 -05007667 /* unknown frames can happen durning LNI, just count */
7668 if (err & UNKNOWN_FRAME) {
7669 ppd->unknown_frame_count++;
7670 err &= ~(u64)UNKNOWN_FRAME;
7671 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007672 if (err) {
7673 /* report remaining errors, but do not do anything */
7674 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007675 dc8051_info_err_string(buf, sizeof(buf),
7676 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007677 }
7678
7679 /*
7680 * Handle host message flags.
7681 */
7682 if (host_msg & HOST_REQ_DONE) {
7683 /*
7684 * Presently, the driver does a busy wait for
7685 * host requests to complete. This is only an
7686 * informational message.
7687 * NOTE: The 8051 clears the host message
7688 * information *on the next 8051 command*.
7689 * Therefore, when linkup is achieved,
7690 * this flag will still be set.
7691 */
7692 host_msg &= ~(u64)HOST_REQ_DONE;
7693 }
7694 if (host_msg & BC_SMA_MSG) {
7695 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7696 host_msg &= ~(u64)BC_SMA_MSG;
7697 }
7698 if (host_msg & LINKUP_ACHIEVED) {
7699 dd_dev_info(dd, "8051: Link up\n");
7700 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7701 host_msg &= ~(u64)LINKUP_ACHIEVED;
7702 }
7703 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007704 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007705 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7706 }
7707 if (host_msg & VERIFY_CAP_FRAME) {
7708 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7709 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7710 }
7711 if (host_msg & LINK_GOING_DOWN) {
7712 const char *extra = "";
7713 /* no downgrade action needed if going down */
7714 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7715 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7716 extra = " (ignoring downgrade)";
7717 }
7718 dd_dev_info(dd, "8051: Link down%s\n", extra);
7719 queue_link_down = 1;
7720 host_msg &= ~(u64)LINK_GOING_DOWN;
7721 }
7722 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7723 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7724 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7725 }
7726 if (host_msg) {
7727 /* report remaining messages, but do not do anything */
7728 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007729 dc8051_info_host_msg_string(buf,
7730 sizeof(buf),
7731 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007732 }
7733
7734 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7735 }
7736 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7737 /*
7738 * Lost the 8051 heartbeat. If this happens, we
7739 * receive constant interrupts about it. Disable
7740 * the interrupt after the first.
7741 */
7742 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7743 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007744 read_csr(dd, DC_DC8051_ERR_EN) &
7745 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007746
7747 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7748 }
7749 if (reg) {
7750 /* report the error, but do not do anything */
7751 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007752 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007753 }
7754
7755 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007756 /*
7757 * if the link is already going down or disabled, do not
7758 * queue another
7759 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007760 if ((ppd->host_link_state &
7761 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7762 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007763 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007764 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007765 } else {
7766 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7767 }
7768 }
7769}
7770
7771static const char * const fm_config_txt[] = {
7772[0] =
7773 "BadHeadDist: Distance violation between two head flits",
7774[1] =
7775 "BadTailDist: Distance violation between two tail flits",
7776[2] =
7777 "BadCtrlDist: Distance violation between two credit control flits",
7778[3] =
7779 "BadCrdAck: Credits return for unsupported VL",
7780[4] =
7781 "UnsupportedVLMarker: Received VL Marker",
7782[5] =
7783 "BadPreempt: Exceeded the preemption nesting level",
7784[6] =
7785 "BadControlFlit: Received unsupported control flit",
7786/* no 7 */
7787[8] =
7788 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7789};
7790
7791static const char * const port_rcv_txt[] = {
7792[1] =
7793 "BadPktLen: Illegal PktLen",
7794[2] =
7795 "PktLenTooLong: Packet longer than PktLen",
7796[3] =
7797 "PktLenTooShort: Packet shorter than PktLen",
7798[4] =
7799 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7800[5] =
7801 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7802[6] =
7803 "BadL2: Illegal L2 opcode",
7804[7] =
7805 "BadSC: Unsupported SC",
7806[9] =
7807 "BadRC: Illegal RC",
7808[11] =
7809 "PreemptError: Preempting with same VL",
7810[12] =
7811 "PreemptVL15: Preempting a VL15 packet",
7812};
7813
7814#define OPA_LDR_FMCONFIG_OFFSET 16
7815#define OPA_LDR_PORTRCV_OFFSET 0
7816static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7817{
7818 u64 info, hdr0, hdr1;
7819 const char *extra;
7820 char buf[96];
7821 struct hfi1_pportdata *ppd = dd->pport;
7822 u8 lcl_reason = 0;
7823 int do_bounce = 0;
7824
7825 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7826 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7827 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7828 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7829 /* set status bit */
7830 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7831 }
7832 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7833 }
7834
7835 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7836 struct hfi1_pportdata *ppd = dd->pport;
7837 /* this counter saturates at (2^32) - 1 */
7838 if (ppd->link_downed < (u32)UINT_MAX)
7839 ppd->link_downed++;
7840 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7841 }
7842
7843 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7844 u8 reason_valid = 1;
7845
7846 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7847 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7848 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7849 /* set status bit */
7850 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7851 }
7852 switch (info) {
7853 case 0:
7854 case 1:
7855 case 2:
7856 case 3:
7857 case 4:
7858 case 5:
7859 case 6:
7860 extra = fm_config_txt[info];
7861 break;
7862 case 8:
7863 extra = fm_config_txt[info];
7864 if (ppd->port_error_action &
7865 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7866 do_bounce = 1;
7867 /*
7868 * lcl_reason cannot be derived from info
7869 * for this error
7870 */
7871 lcl_reason =
7872 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7873 }
7874 break;
7875 default:
7876 reason_valid = 0;
7877 snprintf(buf, sizeof(buf), "reserved%lld", info);
7878 extra = buf;
7879 break;
7880 }
7881
7882 if (reason_valid && !do_bounce) {
7883 do_bounce = ppd->port_error_action &
7884 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7885 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7886 }
7887
7888 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007889 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7890 extra);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007891 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7892 }
7893
7894 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7895 u8 reason_valid = 1;
7896
7897 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7898 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7899 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7900 if (!(dd->err_info_rcvport.status_and_code &
7901 OPA_EI_STATUS_SMASK)) {
7902 dd->err_info_rcvport.status_and_code =
7903 info & OPA_EI_CODE_SMASK;
7904 /* set status bit */
7905 dd->err_info_rcvport.status_and_code |=
7906 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007907 /*
7908 * save first 2 flits in the packet that caused
7909 * the error
7910 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007911 dd->err_info_rcvport.packet_flit1 = hdr0;
7912 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007913 }
7914 switch (info) {
7915 case 1:
7916 case 2:
7917 case 3:
7918 case 4:
7919 case 5:
7920 case 6:
7921 case 7:
7922 case 9:
7923 case 11:
7924 case 12:
7925 extra = port_rcv_txt[info];
7926 break;
7927 default:
7928 reason_valid = 0;
7929 snprintf(buf, sizeof(buf), "reserved%lld", info);
7930 extra = buf;
7931 break;
7932 }
7933
7934 if (reason_valid && !do_bounce) {
7935 do_bounce = ppd->port_error_action &
7936 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7937 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7938 }
7939
7940 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007941 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7942 " hdr0 0x%llx, hdr1 0x%llx\n",
7943 extra, hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007944
7945 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7946 }
7947
7948 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7949 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007950 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007951 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7952 }
7953 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7954 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007955 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007956 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7957 }
7958
Don Hiatt243d9f42017-03-20 17:26:20 -07007959 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
7960 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
7961
Mike Marciniszyn77241052015-07-30 15:17:43 -04007962 /* report any remaining errors */
7963 if (reg)
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007964 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
7965 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007966
7967 if (lcl_reason == 0)
7968 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7969
7970 if (do_bounce) {
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007971 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
7972 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007973 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7974 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7975 }
7976}
7977
7978static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7979{
7980 char buf[96];
7981
7982 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007983 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007984}
7985
7986/*
7987 * CCE block DC interrupt. Source is < 8.
7988 */
7989static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7990{
7991 const struct err_reg_info *eri = &dc_errs[source];
7992
7993 if (eri->handler) {
7994 interrupt_clear_down(dd, 0, eri);
7995 } else if (source == 3 /* dc_lbm_int */) {
7996 /*
7997 * This indicates that a parity error has occurred on the
7998 * address/control lines presented to the LBM. The error
7999 * is a single pulse, there is no associated error flag,
8000 * and it is non-maskable. This is because if a parity
8001 * error occurs on the request the request is dropped.
8002 * This should never occur, but it is nice to know if it
8003 * ever does.
8004 */
8005 dd_dev_err(dd, "Parity error in DC LBM block\n");
8006 } else {
8007 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8008 }
8009}
8010
8011/*
8012 * TX block send credit interrupt. Source is < 160.
8013 */
8014static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8015{
8016 sc_group_release_update(dd, source);
8017}
8018
8019/*
8020 * TX block SDMA interrupt. Source is < 48.
8021 *
8022 * SDMA interrupts are grouped by type:
8023 *
8024 * 0 - N-1 = SDma
8025 * N - 2N-1 = SDmaProgress
8026 * 2N - 3N-1 = SDmaIdle
8027 */
8028static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8029{
8030 /* what interrupt */
8031 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8032 /* which engine */
8033 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8034
8035#ifdef CONFIG_SDMA_VERBOSITY
8036 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8037 slashstrip(__FILE__), __LINE__, __func__);
8038 sdma_dumpstate(&dd->per_sdma[which]);
8039#endif
8040
8041 if (likely(what < 3 && which < dd->num_sdma)) {
8042 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8043 } else {
8044 /* should not happen */
8045 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8046 }
8047}
8048
8049/*
8050 * RX block receive available interrupt. Source is < 160.
8051 */
8052static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8053{
8054 struct hfi1_ctxtdata *rcd;
8055 char *err_detail;
8056
8057 if (likely(source < dd->num_rcv_contexts)) {
8058 rcd = dd->rcd[source];
8059 if (rcd) {
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008060 /* Check for non-user contexts, including vnic */
8061 if ((source < dd->first_dyn_alloc_ctxt) ||
8062 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
Dean Luickf4f30031c2015-10-26 10:28:44 -04008063 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008064 else
8065 handle_user_interrupt(rcd);
8066 return; /* OK */
8067 }
8068 /* received an interrupt, but no rcd */
8069 err_detail = "dataless";
8070 } else {
8071 /* received an interrupt, but are not using that context */
8072 err_detail = "out of range";
8073 }
8074 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008075 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008076}
8077
8078/*
8079 * RX block receive urgent interrupt. Source is < 160.
8080 */
8081static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8082{
8083 struct hfi1_ctxtdata *rcd;
8084 char *err_detail;
8085
8086 if (likely(source < dd->num_rcv_contexts)) {
8087 rcd = dd->rcd[source];
8088 if (rcd) {
8089 /* only pay attention to user urgent interrupts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008090 if ((source >= dd->first_dyn_alloc_ctxt) &&
8091 (!rcd->sc || (rcd->sc->type == SC_USER)))
Mike Marciniszyn77241052015-07-30 15:17:43 -04008092 handle_user_interrupt(rcd);
8093 return; /* OK */
8094 }
8095 /* received an interrupt, but no rcd */
8096 err_detail = "dataless";
8097 } else {
8098 /* received an interrupt, but are not using that context */
8099 err_detail = "out of range";
8100 }
8101 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008102 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008103}
8104
8105/*
8106 * Reserved range interrupt. Should not be called in normal operation.
8107 */
8108static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8109{
8110 char name[64];
8111
8112 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008113 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008114}
8115
8116static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008117/*
8118 * start end
8119 * name func interrupt func
8120 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008121{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8122 is_misc_err_name, is_misc_err_int },
8123{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8124 is_sdma_eng_err_name, is_sdma_eng_err_int },
8125{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8126 is_sendctxt_err_name, is_sendctxt_err_int },
8127{ IS_SDMA_START, IS_SDMA_END,
8128 is_sdma_eng_name, is_sdma_eng_int },
8129{ IS_VARIOUS_START, IS_VARIOUS_END,
8130 is_various_name, is_various_int },
8131{ IS_DC_START, IS_DC_END,
8132 is_dc_name, is_dc_int },
8133{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8134 is_rcv_avail_name, is_rcv_avail_int },
8135{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8136 is_rcv_urgent_name, is_rcv_urgent_int },
8137{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8138 is_send_credit_name, is_send_credit_int},
8139{ IS_RESERVED_START, IS_RESERVED_END,
8140 is_reserved_name, is_reserved_int},
8141};
8142
8143/*
8144 * Interrupt source interrupt - called when the given source has an interrupt.
8145 * Source is a bit index into an array of 64-bit integers.
8146 */
8147static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8148{
8149 const struct is_table *entry;
8150
8151 /* avoids a double compare by walking the table in-order */
8152 for (entry = &is_table[0]; entry->is_name; entry++) {
8153 if (source < entry->end) {
8154 trace_hfi1_interrupt(dd, entry, source);
8155 entry->is_int(dd, source - entry->start);
8156 return;
8157 }
8158 }
8159 /* fell off the end */
8160 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8161}
8162
8163/*
8164 * General interrupt handler. This is able to correctly handle
8165 * all interrupts in case INTx is used.
8166 */
8167static irqreturn_t general_interrupt(int irq, void *data)
8168{
8169 struct hfi1_devdata *dd = data;
8170 u64 regs[CCE_NUM_INT_CSRS];
8171 u32 bit;
8172 int i;
8173
8174 this_cpu_inc(*dd->int_counter);
8175
8176 /* phase 1: scan and clear all handled interrupts */
8177 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8178 if (dd->gi_mask[i] == 0) {
8179 regs[i] = 0; /* used later */
8180 continue;
8181 }
8182 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8183 dd->gi_mask[i];
8184 /* only clear if anything is set */
8185 if (regs[i])
8186 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8187 }
8188
8189 /* phase 2: call the appropriate handler */
8190 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008191 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008192 is_interrupt(dd, bit);
8193 }
8194
8195 return IRQ_HANDLED;
8196}
8197
8198static irqreturn_t sdma_interrupt(int irq, void *data)
8199{
8200 struct sdma_engine *sde = data;
8201 struct hfi1_devdata *dd = sde->dd;
8202 u64 status;
8203
8204#ifdef CONFIG_SDMA_VERBOSITY
8205 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8206 slashstrip(__FILE__), __LINE__, __func__);
8207 sdma_dumpstate(sde);
8208#endif
8209
8210 this_cpu_inc(*dd->int_counter);
8211
8212 /* This read_csr is really bad in the hot path */
8213 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008214 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8215 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008216 if (likely(status)) {
8217 /* clear the interrupt(s) */
8218 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008219 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8220 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008221
8222 /* handle the interrupt(s) */
8223 sdma_engine_interrupt(sde, status);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008224 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008225 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008226 sde->this_idx);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008227 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04008228 return IRQ_HANDLED;
8229}
8230
8231/*
Dean Luickecd42f82016-02-03 14:35:14 -08008232 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8233 * to insure that the write completed. This does NOT guarantee that
8234 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008235 */
8236static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8237{
8238 struct hfi1_devdata *dd = rcd->dd;
8239 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8240
8241 mmiowb(); /* make sure everything before is written */
8242 write_csr(dd, addr, rcd->imask);
8243 /* force the above write on the chip and get a value back */
8244 (void)read_csr(dd, addr);
8245}
8246
8247/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008248void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008249{
8250 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8251}
8252
Dean Luickecd42f82016-02-03 14:35:14 -08008253/*
8254 * Return non-zero if a packet is present.
8255 *
8256 * This routine is called when rechecking for packets after the RcvAvail
8257 * interrupt has been cleared down. First, do a quick check of memory for
8258 * a packet present. If not found, use an expensive CSR read of the context
8259 * tail to determine the actual tail. The CSR read is necessary because there
8260 * is no method to push pending DMAs to memory other than an interrupt and we
8261 * are trying to determine if we need to force an interrupt.
8262 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008263static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8264{
Dean Luickecd42f82016-02-03 14:35:14 -08008265 u32 tail;
8266 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008267
Dean Luickecd42f82016-02-03 14:35:14 -08008268 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8269 present = (rcd->seq_cnt ==
8270 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8271 else /* is RDMA rtail */
8272 present = (rcd->head != get_rcvhdrtail(rcd));
8273
8274 if (present)
8275 return 1;
8276
8277 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8278 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8279 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008280}
8281
8282/*
8283 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8284 * This routine will try to handle packets immediately (latency), but if
8285 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008286 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008287 * invoked) is finished. The intent is to avoid extra interrupts while we
8288 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008289 */
8290static irqreturn_t receive_context_interrupt(int irq, void *data)
8291{
8292 struct hfi1_ctxtdata *rcd = data;
8293 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008294 int disposition;
8295 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008296
8297 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8298 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008299 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008300
Dean Luickf4f30031c2015-10-26 10:28:44 -04008301 /* receive interrupt remains blocked while processing packets */
8302 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008303
Dean Luickf4f30031c2015-10-26 10:28:44 -04008304 /*
8305 * Too many packets were seen while processing packets in this
8306 * IRQ handler. Invoke the handler thread. The receive interrupt
8307 * remains blocked.
8308 */
8309 if (disposition == RCV_PKT_LIMIT)
8310 return IRQ_WAKE_THREAD;
8311
8312 /*
8313 * The packet processor detected no more packets. Clear the receive
8314 * interrupt and recheck for a packet packet that may have arrived
8315 * after the previous check and interrupt clear. If a packet arrived,
8316 * force another interrupt.
8317 */
8318 clear_recv_intr(rcd);
8319 present = check_packet_present(rcd);
8320 if (present)
8321 force_recv_intr(rcd);
8322
8323 return IRQ_HANDLED;
8324}
8325
8326/*
8327 * Receive packet thread handler. This expects to be invoked with the
8328 * receive interrupt still blocked.
8329 */
8330static irqreturn_t receive_context_thread(int irq, void *data)
8331{
8332 struct hfi1_ctxtdata *rcd = data;
8333 int present;
8334
8335 /* receive interrupt is still blocked from the IRQ handler */
8336 (void)rcd->do_interrupt(rcd, 1);
8337
8338 /*
8339 * The packet processor will only return if it detected no more
8340 * packets. Hold IRQs here so we can safely clear the interrupt and
8341 * recheck for a packet that may have arrived after the previous
8342 * check and the interrupt clear. If a packet arrived, force another
8343 * interrupt.
8344 */
8345 local_irq_disable();
8346 clear_recv_intr(rcd);
8347 present = check_packet_present(rcd);
8348 if (present)
8349 force_recv_intr(rcd);
8350 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008351
8352 return IRQ_HANDLED;
8353}
8354
8355/* ========================================================================= */
8356
8357u32 read_physical_state(struct hfi1_devdata *dd)
8358{
8359 u64 reg;
8360
8361 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8362 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8363 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8364}
8365
Jim Snowfb9036d2016-01-11 18:32:21 -05008366u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008367{
8368 u64 reg;
8369
8370 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8371 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8372 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8373}
8374
8375static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8376{
8377 u64 reg;
8378
8379 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8380 /* clear current state, set new state */
8381 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8382 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8383 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8384}
8385
8386/*
8387 * Use the 8051 to read a LCB CSR.
8388 */
8389static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8390{
8391 u32 regno;
8392 int ret;
8393
8394 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8395 if (acquire_lcb_access(dd, 0) == 0) {
8396 *data = read_csr(dd, addr);
8397 release_lcb_access(dd, 0);
8398 return 0;
8399 }
8400 return -EBUSY;
8401 }
8402
8403 /* register is an index of LCB registers: (offset - base) / 8 */
8404 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8405 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8406 if (ret != HCMD_SUCCESS)
8407 return -EBUSY;
8408 return 0;
8409}
8410
8411/*
Michael J. Ruhl86884262017-03-20 17:24:51 -07008412 * Provide a cache for some of the LCB registers in case the LCB is
8413 * unavailable.
8414 * (The LCB is unavailable in certain link states, for example.)
8415 */
8416struct lcb_datum {
8417 u32 off;
8418 u64 val;
8419};
8420
8421static struct lcb_datum lcb_cache[] = {
8422 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8423 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8424 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8425};
8426
8427static void update_lcb_cache(struct hfi1_devdata *dd)
8428{
8429 int i;
8430 int ret;
8431 u64 val;
8432
8433 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8434 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8435
8436 /* Update if we get good data */
8437 if (likely(ret != -EBUSY))
8438 lcb_cache[i].val = val;
8439 }
8440}
8441
8442static int read_lcb_cache(u32 off, u64 *val)
8443{
8444 int i;
8445
8446 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8447 if (lcb_cache[i].off == off) {
8448 *val = lcb_cache[i].val;
8449 return 0;
8450 }
8451 }
8452
8453 pr_warn("%s bad offset 0x%x\n", __func__, off);
8454 return -1;
8455}
8456
8457/*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008458 * Read an LCB CSR. Access may not be in host control, so check.
8459 * Return 0 on success, -EBUSY on failure.
8460 */
8461int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8462{
8463 struct hfi1_pportdata *ppd = dd->pport;
8464
8465 /* if up, go through the 8051 for the value */
8466 if (ppd->host_link_state & HLS_UP)
8467 return read_lcb_via_8051(dd, addr, data);
Michael J. Ruhl86884262017-03-20 17:24:51 -07008468 /* if going up or down, check the cache, otherwise, no access */
8469 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8470 if (read_lcb_cache(addr, data))
8471 return -EBUSY;
8472 return 0;
8473 }
8474
Mike Marciniszyn77241052015-07-30 15:17:43 -04008475 /* otherwise, host has access */
8476 *data = read_csr(dd, addr);
8477 return 0;
8478}
8479
8480/*
8481 * Use the 8051 to write a LCB CSR.
8482 */
8483static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8484{
Dean Luick3bf40d62015-11-06 20:07:04 -05008485 u32 regno;
8486 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008487
Dean Luick3bf40d62015-11-06 20:07:04 -05008488 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008489 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
Dean Luick3bf40d62015-11-06 20:07:04 -05008490 if (acquire_lcb_access(dd, 0) == 0) {
8491 write_csr(dd, addr, data);
8492 release_lcb_access(dd, 0);
8493 return 0;
8494 }
8495 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008496 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008497
8498 /* register is an index of LCB registers: (offset - base) / 8 */
8499 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8500 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8501 if (ret != HCMD_SUCCESS)
8502 return -EBUSY;
8503 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008504}
8505
8506/*
8507 * Write an LCB CSR. Access may not be in host control, so check.
8508 * Return 0 on success, -EBUSY on failure.
8509 */
8510int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8511{
8512 struct hfi1_pportdata *ppd = dd->pport;
8513
8514 /* if up, go through the 8051 for the value */
8515 if (ppd->host_link_state & HLS_UP)
8516 return write_lcb_via_8051(dd, addr, data);
8517 /* if going up or down, no access */
8518 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8519 return -EBUSY;
8520 /* otherwise, host has access */
8521 write_csr(dd, addr, data);
8522 return 0;
8523}
8524
8525/*
8526 * Returns:
8527 * < 0 = Linux error, not able to get access
8528 * > 0 = 8051 command RETURN_CODE
8529 */
8530static int do_8051_command(
8531 struct hfi1_devdata *dd,
8532 u32 type,
8533 u64 in_data,
8534 u64 *out_data)
8535{
8536 u64 reg, completed;
8537 int return_code;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008538 unsigned long timeout;
8539
8540 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8541
Tadeusz Struk22546b72017-04-28 10:40:02 -07008542 mutex_lock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008543
8544 /* We can't send any commands to the 8051 if it's in reset */
8545 if (dd->dc_shutdown) {
8546 return_code = -ENODEV;
8547 goto fail;
8548 }
8549
8550 /*
8551 * If an 8051 host command timed out previously, then the 8051 is
8552 * stuck.
8553 *
8554 * On first timeout, attempt to reset and restart the entire DC
8555 * block (including 8051). (Is this too big of a hammer?)
8556 *
8557 * If the 8051 times out a second time, the reset did not bring it
8558 * back to healthy life. In that case, fail any subsequent commands.
8559 */
8560 if (dd->dc8051_timed_out) {
8561 if (dd->dc8051_timed_out > 1) {
8562 dd_dev_err(dd,
8563 "Previous 8051 host command timed out, skipping command %u\n",
8564 type);
8565 return_code = -ENXIO;
8566 goto fail;
8567 }
Tadeusz Struk22546b72017-04-28 10:40:02 -07008568 _dc_shutdown(dd);
8569 _dc_start(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008570 }
8571
8572 /*
8573 * If there is no timeout, then the 8051 command interface is
8574 * waiting for a command.
8575 */
8576
8577 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008578 * When writing a LCB CSR, out_data contains the full value to
8579 * to be written, while in_data contains the relative LCB
8580 * address in 7:0. Do the work here, rather than the caller,
8581 * of distrubting the write data to where it needs to go:
8582 *
8583 * Write data
8584 * 39:00 -> in_data[47:8]
8585 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8586 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8587 */
8588 if (type == HCMD_WRITE_LCB_CSR) {
8589 in_data |= ((*out_data) & 0xffffffffffull) << 8;
Dean Luick00801672016-12-07 19:33:40 -08008590 /* must preserve COMPLETED - it is tied to hardware */
8591 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8592 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8593 reg |= ((((*out_data) >> 40) & 0xff) <<
Dean Luick3bf40d62015-11-06 20:07:04 -05008594 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8595 | ((((*out_data) >> 48) & 0xffff) <<
8596 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8597 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8598 }
8599
8600 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008601 * Do two writes: the first to stabilize the type and req_data, the
8602 * second to activate.
8603 */
8604 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8605 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8606 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8607 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8608 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8609 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8610 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8611
8612 /* wait for completion, alternate: interrupt */
8613 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8614 while (1) {
8615 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8616 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8617 if (completed)
8618 break;
8619 if (time_after(jiffies, timeout)) {
8620 dd->dc8051_timed_out++;
8621 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8622 if (out_data)
8623 *out_data = 0;
8624 return_code = -ETIMEDOUT;
8625 goto fail;
8626 }
8627 udelay(2);
8628 }
8629
8630 if (out_data) {
8631 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8632 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8633 if (type == HCMD_READ_LCB_CSR) {
8634 /* top 16 bits are in a different register */
8635 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8636 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8637 << (48
8638 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8639 }
8640 }
8641 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8642 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8643 dd->dc8051_timed_out = 0;
8644 /*
8645 * Clear command for next user.
8646 */
8647 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8648
8649fail:
Tadeusz Struk22546b72017-04-28 10:40:02 -07008650 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008651 return return_code;
8652}
8653
8654static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8655{
8656 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8657}
8658
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008659int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8660 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008661{
8662 u64 data;
8663 int ret;
8664
8665 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8666 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8667 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8668 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8669 if (ret != HCMD_SUCCESS) {
8670 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008671 "load 8051 config: field id %d, lane %d, err %d\n",
8672 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008673 }
8674 return ret;
8675}
8676
8677/*
8678 * Read the 8051 firmware "registers". Use the RAM directly. Always
8679 * set the result, even on error.
8680 * Return 0 on success, -errno on failure
8681 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008682int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8683 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008684{
8685 u64 big_data;
8686 u32 addr;
8687 int ret;
8688
8689 /* address start depends on the lane_id */
8690 if (lane_id < 4)
8691 addr = (4 * NUM_GENERAL_FIELDS)
8692 + (lane_id * 4 * NUM_LANE_FIELDS);
8693 else
8694 addr = 0;
8695 addr += field_id * 4;
8696
8697 /* read is in 8-byte chunks, hardware will truncate the address down */
8698 ret = read_8051_data(dd, addr, 8, &big_data);
8699
8700 if (ret == 0) {
8701 /* extract the 4 bytes we want */
8702 if (addr & 0x4)
8703 *result = (u32)(big_data >> 32);
8704 else
8705 *result = (u32)big_data;
8706 } else {
8707 *result = 0;
8708 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008709 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008710 }
8711
8712 return ret;
8713}
8714
8715static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8716 u8 continuous)
8717{
8718 u32 frame;
8719
8720 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8721 | power_management << POWER_MANAGEMENT_SHIFT;
8722 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8723 GENERAL_CONFIG, frame);
8724}
8725
8726static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8727 u16 vl15buf, u8 crc_sizes)
8728{
8729 u32 frame;
8730
8731 frame = (u32)vau << VAU_SHIFT
8732 | (u32)z << Z_SHIFT
8733 | (u32)vcu << VCU_SHIFT
8734 | (u32)vl15buf << VL15BUF_SHIFT
8735 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8736 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8737 GENERAL_CONFIG, frame);
8738}
8739
8740static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8741 u8 *flag_bits, u16 *link_widths)
8742{
8743 u32 frame;
8744
8745 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008746 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008747 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8748 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8749 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8750}
8751
8752static int write_vc_local_link_width(struct hfi1_devdata *dd,
8753 u8 misc_bits,
8754 u8 flag_bits,
8755 u16 link_widths)
8756{
8757 u32 frame;
8758
8759 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8760 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8761 | (u32)link_widths << LINK_WIDTH_SHIFT;
8762 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8763 frame);
8764}
8765
8766static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8767 u8 device_rev)
8768{
8769 u32 frame;
8770
8771 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8772 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8773 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8774}
8775
8776static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8777 u8 *device_rev)
8778{
8779 u32 frame;
8780
8781 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8782 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8783 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8784 & REMOTE_DEVICE_REV_MASK;
8785}
8786
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008787void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8788 u8 *ver_patch)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008789{
8790 u32 frame;
8791
8792 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008793 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8794 STS_FM_VERSION_MAJOR_MASK;
8795 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8796 STS_FM_VERSION_MINOR_MASK;
8797
8798 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8799 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8800 STS_FM_VERSION_PATCH_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008801}
8802
8803static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8804 u8 *continuous)
8805{
8806 u32 frame;
8807
8808 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8809 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8810 & POWER_MANAGEMENT_MASK;
8811 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8812 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8813}
8814
8815static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8816 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8817{
8818 u32 frame;
8819
8820 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8821 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8822 *z = (frame >> Z_SHIFT) & Z_MASK;
8823 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8824 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8825 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8826}
8827
8828static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8829 u8 *remote_tx_rate,
8830 u16 *link_widths)
8831{
8832 u32 frame;
8833
8834 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008835 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008836 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8837 & REMOTE_TX_RATE_MASK;
8838 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8839}
8840
8841static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8842{
8843 u32 frame;
8844
8845 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8846 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8847}
8848
8849static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8850{
8851 u32 frame;
8852
8853 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8854 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8855}
8856
8857static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8858{
8859 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8860}
8861
8862static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8863{
8864 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8865}
8866
8867void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8868{
8869 u32 frame;
8870 int ret;
8871
8872 *link_quality = 0;
8873 if (dd->pport->host_link_state & HLS_UP) {
8874 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008875 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008876 if (ret == 0)
8877 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8878 & LINK_QUALITY_MASK;
8879 }
8880}
8881
8882static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8883{
8884 u32 frame;
8885
8886 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8887 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8888}
8889
Dean Luickfeb831d2016-04-14 08:31:36 -07008890static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8891{
8892 u32 frame;
8893
8894 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8895 *ldr = (frame & 0xff);
8896}
8897
Mike Marciniszyn77241052015-07-30 15:17:43 -04008898static int read_tx_settings(struct hfi1_devdata *dd,
8899 u8 *enable_lane_tx,
8900 u8 *tx_polarity_inversion,
8901 u8 *rx_polarity_inversion,
8902 u8 *max_rate)
8903{
8904 u32 frame;
8905 int ret;
8906
8907 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8908 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8909 & ENABLE_LANE_TX_MASK;
8910 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8911 & TX_POLARITY_INVERSION_MASK;
8912 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8913 & RX_POLARITY_INVERSION_MASK;
8914 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8915 return ret;
8916}
8917
8918static int write_tx_settings(struct hfi1_devdata *dd,
8919 u8 enable_lane_tx,
8920 u8 tx_polarity_inversion,
8921 u8 rx_polarity_inversion,
8922 u8 max_rate)
8923{
8924 u32 frame;
8925
8926 /* no need to mask, all variable sizes match field widths */
8927 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8928 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8929 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8930 | max_rate << MAX_RATE_SHIFT;
8931 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8932}
8933
Mike Marciniszyn77241052015-07-30 15:17:43 -04008934/*
8935 * Read an idle LCB message.
8936 *
8937 * Returns 0 on success, -EINVAL on error
8938 */
8939static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8940{
8941 int ret;
8942
Jubin John17fb4f22016-02-14 20:21:52 -08008943 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008944 if (ret != HCMD_SUCCESS) {
8945 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008946 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008947 return -EINVAL;
8948 }
8949 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8950 /* return only the payload as we already know the type */
8951 *data_out >>= IDLE_PAYLOAD_SHIFT;
8952 return 0;
8953}
8954
8955/*
8956 * Read an idle SMA message. To be done in response to a notification from
8957 * the 8051.
8958 *
8959 * Returns 0 on success, -EINVAL on error
8960 */
8961static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8962{
Jubin John17fb4f22016-02-14 20:21:52 -08008963 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8964 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008965}
8966
8967/*
8968 * Send an idle LCB message.
8969 *
8970 * Returns 0 on success, -EINVAL on error
8971 */
8972static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8973{
8974 int ret;
8975
8976 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8977 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8978 if (ret != HCMD_SUCCESS) {
8979 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008980 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008981 return -EINVAL;
8982 }
8983 return 0;
8984}
8985
8986/*
8987 * Send an idle SMA message.
8988 *
8989 * Returns 0 on success, -EINVAL on error
8990 */
8991int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8992{
8993 u64 data;
8994
Jubin John17fb4f22016-02-14 20:21:52 -08008995 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8996 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008997 return send_idle_message(dd, data);
8998}
8999
9000/*
9001 * Initialize the LCB then do a quick link up. This may or may not be
9002 * in loopback.
9003 *
9004 * return 0 on success, -errno on error
9005 */
9006static int do_quick_linkup(struct hfi1_devdata *dd)
9007{
Mike Marciniszyn77241052015-07-30 15:17:43 -04009008 int ret;
9009
9010 lcb_shutdown(dd, 0);
9011
9012 if (loopback) {
9013 /* LCB_CFG_LOOPBACK.VAL = 2 */
9014 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9015 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08009016 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009017 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9018 }
9019
9020 /* start the LCBs */
9021 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9022 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9023
9024 /* simulator only loopback steps */
9025 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9026 /* LCB_CFG_RUN.EN = 1 */
9027 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08009028 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009029
Dean Luickec8a1422017-03-20 17:24:39 -07009030 ret = wait_link_transfer_active(dd, 10);
9031 if (ret)
9032 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009033
9034 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08009035 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009036 }
9037
9038 if (!loopback) {
9039 /*
9040 * When doing quick linkup and not in loopback, both
9041 * sides must be done with LCB set-up before either
9042 * starts the quick linkup. Put a delay here so that
9043 * both sides can be started and have a chance to be
9044 * done with LCB set up before resuming.
9045 */
9046 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009047 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009048 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08009049 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009050 }
9051
9052 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9053 set_8051_lcb_access(dd);
9054
9055 /*
9056 * State "quick" LinkUp request sets the physical link state to
9057 * LinkUp without a verify capability sequence.
9058 * This state is in simulator v37 and later.
9059 */
9060 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9061 if (ret != HCMD_SUCCESS) {
9062 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009063 "%s: set physical link state to quick LinkUp failed with return %d\n",
9064 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009065
9066 set_host_lcb_access(dd);
9067 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9068
9069 if (ret >= 0)
9070 ret = -EINVAL;
9071 return ret;
9072 }
9073
9074 return 0; /* success */
9075}
9076
9077/*
9078 * Set the SerDes to internal loopback mode.
9079 * Returns 0 on success, -errno on error.
9080 */
9081static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9082{
9083 int ret;
9084
9085 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9086 if (ret == HCMD_SUCCESS)
9087 return 0;
9088 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009089 "Set physical link state to SerDes Loopback failed with return %d\n",
9090 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009091 if (ret >= 0)
9092 ret = -EINVAL;
9093 return ret;
9094}
9095
9096/*
9097 * Do all special steps to set up loopback.
9098 */
9099static int init_loopback(struct hfi1_devdata *dd)
9100{
9101 dd_dev_info(dd, "Entering loopback mode\n");
9102
9103 /* all loopbacks should disable self GUID check */
9104 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009105 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009106
9107 /*
9108 * The simulator has only one loopback option - LCB. Switch
9109 * to that option, which includes quick link up.
9110 *
9111 * Accept all valid loopback values.
9112 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009113 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9114 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9115 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009116 loopback = LOOPBACK_LCB;
9117 quick_linkup = 1;
9118 return 0;
9119 }
9120
9121 /* handle serdes loopback */
9122 if (loopback == LOOPBACK_SERDES) {
9123 /* internal serdes loopack needs quick linkup on RTL */
9124 if (dd->icode == ICODE_RTL_SILICON)
9125 quick_linkup = 1;
9126 return set_serdes_loopback_mode(dd);
9127 }
9128
9129 /* LCB loopback - handled at poll time */
9130 if (loopback == LOOPBACK_LCB) {
9131 quick_linkup = 1; /* LCB is always quick linkup */
9132
9133 /* not supported in emulation due to emulation RTL changes */
9134 if (dd->icode == ICODE_FPGA_EMULATION) {
9135 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009136 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009137 return -EINVAL;
9138 }
9139 return 0;
9140 }
9141
9142 /* external cable loopback requires no extra steps */
9143 if (loopback == LOOPBACK_CABLE)
9144 return 0;
9145
9146 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9147 return -EINVAL;
9148}
9149
9150/*
9151 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9152 * used in the Verify Capability link width attribute.
9153 */
9154static u16 opa_to_vc_link_widths(u16 opa_widths)
9155{
9156 int i;
9157 u16 result = 0;
9158
9159 static const struct link_bits {
9160 u16 from;
9161 u16 to;
9162 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009163 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9164 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9165 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9166 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009167 };
9168
9169 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9170 if (opa_widths & opa_link_xlate[i].from)
9171 result |= opa_link_xlate[i].to;
9172 }
9173 return result;
9174}
9175
9176/*
9177 * Set link attributes before moving to polling.
9178 */
9179static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9180{
9181 struct hfi1_devdata *dd = ppd->dd;
9182 u8 enable_lane_tx;
9183 u8 tx_polarity_inversion;
9184 u8 rx_polarity_inversion;
9185 int ret;
9186
9187 /* reset our fabric serdes to clear any lingering problems */
9188 fabric_serdes_reset(dd);
9189
9190 /* set the local tx rate - need to read-modify-write */
9191 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009192 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009193 if (ret)
9194 goto set_local_link_attributes_fail;
9195
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07009196 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009197 /* set the tx rate to the fastest enabled */
9198 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9199 ppd->local_tx_rate = 1;
9200 else
9201 ppd->local_tx_rate = 0;
9202 } else {
9203 /* set the tx rate to all enabled */
9204 ppd->local_tx_rate = 0;
9205 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9206 ppd->local_tx_rate |= 2;
9207 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9208 ppd->local_tx_rate |= 1;
9209 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009210
9211 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009212 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009213 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009214 if (ret != HCMD_SUCCESS)
9215 goto set_local_link_attributes_fail;
9216
9217 /*
9218 * DC supports continuous updates.
9219 */
Jubin John17fb4f22016-02-14 20:21:52 -08009220 ret = write_vc_local_phy(dd,
9221 0 /* no power management */,
9222 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009223 if (ret != HCMD_SUCCESS)
9224 goto set_local_link_attributes_fail;
9225
9226 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9227 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9228 ppd->port_crc_mode_enabled);
9229 if (ret != HCMD_SUCCESS)
9230 goto set_local_link_attributes_fail;
9231
9232 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009233 opa_to_vc_link_widths(
9234 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009235 if (ret != HCMD_SUCCESS)
9236 goto set_local_link_attributes_fail;
9237
9238 /* let peer know who we are */
9239 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9240 if (ret == HCMD_SUCCESS)
9241 return 0;
9242
9243set_local_link_attributes_fail:
9244 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009245 "Failed to set local link attributes, return 0x%x\n",
9246 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009247 return ret;
9248}
9249
9250/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009251 * Call this to start the link.
9252 * Do not do anything if the link is disabled.
9253 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009254 */
9255int start_link(struct hfi1_pportdata *ppd)
9256{
Dean Luick0db9dec2016-09-06 04:35:20 -07009257 /*
9258 * Tune the SerDes to a ballpark setting for optimal signal and bit
9259 * error rate. Needs to be done before starting the link.
9260 */
9261 tune_serdes(ppd);
9262
Mike Marciniszyn77241052015-07-30 15:17:43 -04009263 if (!ppd->link_enabled) {
9264 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009265 "%s: stopping link start because link is disabled\n",
9266 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009267 return 0;
9268 }
9269 if (!ppd->driver_link_ready) {
9270 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009271 "%s: stopping link start because driver is not ready\n",
9272 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009273 return 0;
9274 }
9275
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009276 /*
9277 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9278 * pkey table can be configured properly if the HFI unit is connected
9279 * to switch port with MgmtAllowed=NO
9280 */
9281 clear_full_mgmt_pkey(ppd);
9282
Easwar Hariharan623bba22016-04-12 11:25:57 -07009283 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009284}
9285
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009286static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9287{
9288 struct hfi1_devdata *dd = ppd->dd;
9289 u64 mask;
9290 unsigned long timeout;
9291
9292 /*
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009293 * Some QSFP cables have a quirk that asserts the IntN line as a side
9294 * effect of power up on plug-in. We ignore this false positive
9295 * interrupt until the module has finished powering up by waiting for
9296 * a minimum timeout of the module inrush initialization time of
9297 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9298 * module have stabilized.
9299 */
9300 msleep(500);
9301
9302 /*
9303 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009304 */
9305 timeout = jiffies + msecs_to_jiffies(2000);
9306 while (1) {
9307 mask = read_csr(dd, dd->hfi1_id ?
9308 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009309 if (!(mask & QSFP_HFI0_INT_N))
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009310 break;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009311 if (time_after(jiffies, timeout)) {
9312 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9313 __func__);
9314 break;
9315 }
9316 udelay(2);
9317 }
9318}
9319
9320static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9321{
9322 struct hfi1_devdata *dd = ppd->dd;
9323 u64 mask;
9324
9325 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009326 if (enable) {
9327 /*
9328 * Clear the status register to avoid an immediate interrupt
9329 * when we re-enable the IntN pin
9330 */
9331 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9332 QSFP_HFI0_INT_N);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009333 mask |= (u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009334 } else {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009335 mask &= ~(u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009336 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009337 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9338}
9339
9340void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009341{
9342 struct hfi1_devdata *dd = ppd->dd;
9343 u64 mask, qsfp_mask;
9344
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009345 /* Disable INT_N from triggering QSFP interrupts */
9346 set_qsfp_int_n(ppd, 0);
9347
9348 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009349 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009350
9351 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009352 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009353 qsfp_mask &= ~mask;
9354 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009355 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009356
9357 udelay(10);
9358
9359 qsfp_mask |= mask;
9360 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009361 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009362
9363 wait_for_qsfp_init(ppd);
9364
9365 /*
9366 * Allow INT_N to trigger the QSFP interrupt to watch
9367 * for alarms and warnings
9368 */
9369 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009370}
9371
9372static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9373 u8 *qsfp_interrupt_status)
9374{
9375 struct hfi1_devdata *dd = ppd->dd;
9376
9377 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009378 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
Neel Desai03e80e92017-04-09 10:16:47 -07009379 dd_dev_info(dd, "%s: QSFP cable temperature too high\n",
Jubin John17fb4f22016-02-14 20:21:52 -08009380 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009381
9382 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009383 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9384 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9385 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009386
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009387 /*
9388 * The remaining alarms/warnings don't matter if the link is down.
9389 */
9390 if (ppd->host_link_state & HLS_DOWN)
9391 return 0;
9392
Mike Marciniszyn77241052015-07-30 15:17:43 -04009393 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009394 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9395 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9396 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009397
9398 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009399 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9400 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9401 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009402
9403 /* Byte 2 is vendor specific */
9404
9405 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009406 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9407 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9408 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009409
9410 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009411 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9412 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9413 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009414
9415 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009416 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9417 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9418 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009419
9420 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009421 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9422 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9423 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009424
9425 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009426 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9427 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9428 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009429
9430 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009431 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9432 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9433 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009434
9435 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009436 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9437 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9438 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009439
9440 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009441 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9442 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9443 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009444
9445 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009446 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9447 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9448 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009449
9450 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009451 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9452 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9453 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009454
9455 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009456 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9457 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9458 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009459
9460 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009461 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9462 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9463 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009464
9465 /* Bytes 9-10 and 11-12 are reserved */
9466 /* Bytes 13-15 are vendor specific */
9467
9468 return 0;
9469}
9470
Easwar Hariharan623bba22016-04-12 11:25:57 -07009471/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009472void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009473{
9474 struct qsfp_data *qd;
9475 struct hfi1_pportdata *ppd;
9476 struct hfi1_devdata *dd;
9477
9478 qd = container_of(work, struct qsfp_data, qsfp_work);
9479 ppd = qd->ppd;
9480 dd = ppd->dd;
9481
9482 /* Sanity check */
9483 if (!qsfp_mod_present(ppd))
9484 return;
9485
9486 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009487 * Turn DC back on after cable has been re-inserted. Up until
9488 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009489 */
9490 dc_start(dd);
9491
9492 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009493 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009494
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009495 wait_for_qsfp_init(ppd);
9496
9497 /*
9498 * Allow INT_N to trigger the QSFP interrupt to watch
9499 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009500 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009501 set_qsfp_int_n(ppd, 1);
9502
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009503 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009504 }
9505
9506 if (qd->check_interrupt_flags) {
9507 u8 qsfp_interrupt_status[16] = {0,};
9508
Dean Luick765a6fa2016-03-05 08:50:06 -08009509 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9510 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009511 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009512 "%s: Failed to read status of QSFP module\n",
9513 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009514 } else {
9515 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009516
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009517 handle_qsfp_error_conditions(
9518 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009519 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9520 ppd->qsfp_info.check_interrupt_flags = 0;
9521 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009522 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009523 }
9524 }
9525}
9526
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009527static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009528{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009529 struct hfi1_pportdata *ppd = dd->pport;
9530 u64 qsfp_mask, cce_int_mask;
9531 const int qsfp1_int_smask = QSFP1_INT % 64;
9532 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009533
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009534 /*
9535 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9536 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9537 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9538 * the index of the appropriate CSR in the CCEIntMask CSR array
9539 */
9540 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9541 (8 * (QSFP1_INT / 64)));
9542 if (dd->hfi1_id) {
9543 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9544 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9545 cce_int_mask);
9546 } else {
9547 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9548 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9549 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009550 }
9551
Mike Marciniszyn77241052015-07-30 15:17:43 -04009552 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9553 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009554 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9555 qsfp_mask);
9556 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9557 qsfp_mask);
9558
9559 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009560
9561 /* Handle active low nature of INT_N and MODPRST_N pins */
9562 if (qsfp_mod_present(ppd))
9563 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9564 write_csr(dd,
9565 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9566 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009567}
9568
Dean Luickbbdeb332015-12-01 15:38:15 -05009569/*
9570 * Do a one-time initialize of the LCB block.
9571 */
9572static void init_lcb(struct hfi1_devdata *dd)
9573{
Dean Luicka59329d2016-02-03 14:32:31 -08009574 /* simulator does not correctly handle LCB cclk loopback, skip */
9575 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9576 return;
9577
Dean Luickbbdeb332015-12-01 15:38:15 -05009578 /* the DC has been reset earlier in the driver load */
9579
9580 /* set LCB for cclk loopback on the port */
9581 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9582 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9583 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9584 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9585 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9586 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9587 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9588}
9589
Dean Luick673b9752016-08-31 07:24:33 -07009590/*
9591 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9592 * on error.
9593 */
9594static int test_qsfp_read(struct hfi1_pportdata *ppd)
9595{
9596 int ret;
9597 u8 status;
9598
Easwar Hariharanfb897ad2017-03-20 17:25:42 -07009599 /*
9600 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9601 * not present
9602 */
9603 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
Dean Luick673b9752016-08-31 07:24:33 -07009604 return 0;
9605
9606 /* read byte 2, the status byte */
9607 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9608 if (ret < 0)
9609 return ret;
9610 if (ret != 1)
9611 return -EIO;
9612
9613 return 0; /* success */
9614}
9615
9616/*
9617 * Values for QSFP retry.
9618 *
9619 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9620 * arrived at from experience on a large cluster.
9621 */
9622#define MAX_QSFP_RETRIES 20
9623#define QSFP_RETRY_WAIT 500 /* msec */
9624
9625/*
9626 * Try a QSFP read. If it fails, schedule a retry for later.
9627 * Called on first link activation after driver load.
9628 */
9629static void try_start_link(struct hfi1_pportdata *ppd)
9630{
9631 if (test_qsfp_read(ppd)) {
9632 /* read failed */
9633 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9634 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9635 return;
9636 }
9637 dd_dev_info(ppd->dd,
9638 "QSFP not responding, waiting and retrying %d\n",
9639 (int)ppd->qsfp_retry_count);
9640 ppd->qsfp_retry_count++;
9641 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9642 msecs_to_jiffies(QSFP_RETRY_WAIT));
9643 return;
9644 }
9645 ppd->qsfp_retry_count = 0;
9646
Dean Luick673b9752016-08-31 07:24:33 -07009647 start_link(ppd);
9648}
9649
9650/*
9651 * Workqueue function to start the link after a delay.
9652 */
9653void handle_start_link(struct work_struct *work)
9654{
9655 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9656 start_link_work.work);
9657 try_start_link(ppd);
9658}
9659
Mike Marciniszyn77241052015-07-30 15:17:43 -04009660int bringup_serdes(struct hfi1_pportdata *ppd)
9661{
9662 struct hfi1_devdata *dd = ppd->dd;
9663 u64 guid;
9664 int ret;
9665
9666 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9667 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9668
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009669 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
Mike Marciniszyn77241052015-07-30 15:17:43 -04009670 if (!guid) {
9671 if (dd->base_guid)
9672 guid = dd->base_guid + ppd->port - 1;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009673 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009674 }
9675
Mike Marciniszyn77241052015-07-30 15:17:43 -04009676 /* Set linkinit_reason on power up per OPA spec */
9677 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9678
Dean Luickbbdeb332015-12-01 15:38:15 -05009679 /* one-time init of the LCB */
9680 init_lcb(dd);
9681
Mike Marciniszyn77241052015-07-30 15:17:43 -04009682 if (loopback) {
9683 ret = init_loopback(dd);
9684 if (ret < 0)
9685 return ret;
9686 }
9687
Easwar Hariharan9775a992016-05-12 10:22:39 -07009688 get_port_type(ppd);
9689 if (ppd->port_type == PORT_TYPE_QSFP) {
9690 set_qsfp_int_n(ppd, 0);
9691 wait_for_qsfp_init(ppd);
9692 set_qsfp_int_n(ppd, 1);
9693 }
9694
Dean Luick673b9752016-08-31 07:24:33 -07009695 try_start_link(ppd);
9696 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009697}
9698
9699void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9700{
9701 struct hfi1_devdata *dd = ppd->dd;
9702
9703 /*
9704 * Shut down the link and keep it down. First turn off that the
9705 * driver wants to allow the link to be up (driver_link_ready).
9706 * Then make sure the link is not automatically restarted
9707 * (link_enabled). Cancel any pending restart. And finally
9708 * go offline.
9709 */
9710 ppd->driver_link_ready = 0;
9711 ppd->link_enabled = 0;
9712
Dean Luick673b9752016-08-31 07:24:33 -07009713 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9714 flush_delayed_work(&ppd->start_link_work);
9715 cancel_delayed_work_sync(&ppd->start_link_work);
9716
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009717 ppd->offline_disabled_reason =
9718 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009719 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009720 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009721 set_link_state(ppd, HLS_DN_OFFLINE);
9722
9723 /* disable the port */
9724 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9725}
9726
9727static inline int init_cpu_counters(struct hfi1_devdata *dd)
9728{
9729 struct hfi1_pportdata *ppd;
9730 int i;
9731
9732 ppd = (struct hfi1_pportdata *)(dd + 1);
9733 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009734 ppd->ibport_data.rvp.rc_acks = NULL;
9735 ppd->ibport_data.rvp.rc_qacks = NULL;
9736 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9737 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9738 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9739 if (!ppd->ibport_data.rvp.rc_acks ||
9740 !ppd->ibport_data.rvp.rc_delayed_comp ||
9741 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009742 return -ENOMEM;
9743 }
9744
9745 return 0;
9746}
9747
9748static const char * const pt_names[] = {
9749 "expected",
9750 "eager",
9751 "invalid"
9752};
9753
9754static const char *pt_name(u32 type)
9755{
9756 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9757}
9758
9759/*
9760 * index is the index into the receive array
9761 */
9762void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9763 u32 type, unsigned long pa, u16 order)
9764{
9765 u64 reg;
9766 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9767 (dd->kregbase + RCV_ARRAY));
9768
9769 if (!(dd->flags & HFI1_PRESENT))
9770 goto done;
9771
9772 if (type == PT_INVALID) {
9773 pa = 0;
9774 } else if (type > PT_INVALID) {
9775 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009776 "unexpected receive array type %u for index %u, not handled\n",
9777 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009778 goto done;
9779 }
9780
9781 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9782 pt_name(type), index, pa, (unsigned long)order);
9783
9784#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9785 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9786 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9787 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9788 << RCV_ARRAY_RT_ADDR_SHIFT;
9789 writeq(reg, base + (index * 8));
9790
9791 if (type == PT_EAGER)
9792 /*
9793 * Eager entries are written one-by-one so we have to push them
9794 * after we write the entry.
9795 */
9796 flush_wc();
9797done:
9798 return;
9799}
9800
9801void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9802{
9803 struct hfi1_devdata *dd = rcd->dd;
9804 u32 i;
9805
9806 /* this could be optimized */
9807 for (i = rcd->eager_base; i < rcd->eager_base +
9808 rcd->egrbufs.alloced; i++)
9809 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9810
9811 for (i = rcd->expected_base;
9812 i < rcd->expected_base + rcd->expected_count; i++)
9813 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9814}
9815
Mike Marciniszyn77241052015-07-30 15:17:43 -04009816static const char * const ib_cfg_name_strings[] = {
9817 "HFI1_IB_CFG_LIDLMC",
9818 "HFI1_IB_CFG_LWID_DG_ENB",
9819 "HFI1_IB_CFG_LWID_ENB",
9820 "HFI1_IB_CFG_LWID",
9821 "HFI1_IB_CFG_SPD_ENB",
9822 "HFI1_IB_CFG_SPD",
9823 "HFI1_IB_CFG_RXPOL_ENB",
9824 "HFI1_IB_CFG_LREV_ENB",
9825 "HFI1_IB_CFG_LINKLATENCY",
9826 "HFI1_IB_CFG_HRTBT",
9827 "HFI1_IB_CFG_OP_VLS",
9828 "HFI1_IB_CFG_VL_HIGH_CAP",
9829 "HFI1_IB_CFG_VL_LOW_CAP",
9830 "HFI1_IB_CFG_OVERRUN_THRESH",
9831 "HFI1_IB_CFG_PHYERR_THRESH",
9832 "HFI1_IB_CFG_LINKDEFAULT",
9833 "HFI1_IB_CFG_PKEYS",
9834 "HFI1_IB_CFG_MTU",
9835 "HFI1_IB_CFG_LSTATE",
9836 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9837 "HFI1_IB_CFG_PMA_TICKS",
9838 "HFI1_IB_CFG_PORT"
9839};
9840
9841static const char *ib_cfg_name(int which)
9842{
9843 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9844 return "invalid";
9845 return ib_cfg_name_strings[which];
9846}
9847
9848int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9849{
9850 struct hfi1_devdata *dd = ppd->dd;
9851 int val = 0;
9852
9853 switch (which) {
9854 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9855 val = ppd->link_width_enabled;
9856 break;
9857 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9858 val = ppd->link_width_active;
9859 break;
9860 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9861 val = ppd->link_speed_enabled;
9862 break;
9863 case HFI1_IB_CFG_SPD: /* current Link speed */
9864 val = ppd->link_speed_active;
9865 break;
9866
9867 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9868 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9869 case HFI1_IB_CFG_LINKLATENCY:
9870 goto unimplemented;
9871
9872 case HFI1_IB_CFG_OP_VLS:
9873 val = ppd->vls_operational;
9874 break;
9875 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9876 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9877 break;
9878 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9879 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9880 break;
9881 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9882 val = ppd->overrun_threshold;
9883 break;
9884 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9885 val = ppd->phy_error_threshold;
9886 break;
9887 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9888 val = dd->link_default;
9889 break;
9890
9891 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9892 case HFI1_IB_CFG_PMA_TICKS:
9893 default:
9894unimplemented:
9895 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9896 dd_dev_info(
9897 dd,
9898 "%s: which %s: not implemented\n",
9899 __func__,
9900 ib_cfg_name(which));
9901 break;
9902 }
9903
9904 return val;
9905}
9906
9907/*
9908 * The largest MAD packet size.
9909 */
9910#define MAX_MAD_PACKET 2048
9911
9912/*
9913 * Return the maximum header bytes that can go on the _wire_
9914 * for this device. This count includes the ICRC which is
9915 * not part of the packet held in memory but it is appended
9916 * by the HW.
9917 * This is dependent on the device's receive header entry size.
9918 * HFI allows this to be set per-receive context, but the
9919 * driver presently enforces a global value.
9920 */
9921u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9922{
9923 /*
9924 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9925 * the Receive Header Entry Size minus the PBC (or RHF) size
9926 * plus one DW for the ICRC appended by HW.
9927 *
9928 * dd->rcd[0].rcvhdrqentsize is in DW.
9929 * We use rcd[0] as all context will have the same value. Also,
9930 * the first kernel context would have been allocated by now so
9931 * we are guaranteed a valid value.
9932 */
9933 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9934}
9935
9936/*
9937 * Set Send Length
9938 * @ppd - per port data
9939 *
9940 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9941 * registers compare against LRH.PktLen, so use the max bytes included
9942 * in the LRH.
9943 *
9944 * This routine changes all VL values except VL15, which it maintains at
9945 * the same value.
9946 */
9947static void set_send_length(struct hfi1_pportdata *ppd)
9948{
9949 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009950 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9951 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009952 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9953 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9954 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009955 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009956 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009957
9958 for (i = 0; i < ppd->vls_supported; i++) {
9959 if (dd->vld[i].mtu > maxvlmtu)
9960 maxvlmtu = dd->vld[i].mtu;
9961 if (i <= 3)
9962 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9963 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9964 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9965 else
9966 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9967 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9968 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9969 }
9970 write_csr(dd, SEND_LEN_CHECK0, len1);
9971 write_csr(dd, SEND_LEN_CHECK1, len2);
9972 /* adjust kernel credit return thresholds based on new MTUs */
9973 /* all kernel receive contexts have the same hdrqentsize */
9974 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009975 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9976 sc_mtu_to_threshold(dd->vld[i].sc,
9977 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009978 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009979 for (j = 0; j < INIT_SC_PER_VL; j++)
9980 sc_set_cr_threshold(
9981 pio_select_send_context_vl(dd, j, i),
9982 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009983 }
9984 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9985 sc_mtu_to_threshold(dd->vld[15].sc,
9986 dd->vld[15].mtu,
9987 dd->rcd[0]->rcvhdrqentsize));
9988 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009989
9990 /* Adjust maximum MTU for the port in DC */
9991 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9992 (ilog2(maxvlmtu >> 8) + 1);
9993 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9994 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9995 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9996 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9997 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9998}
9999
10000static void set_lidlmc(struct hfi1_pportdata *ppd)
10001{
10002 int i;
10003 u64 sreg = 0;
10004 struct hfi1_devdata *dd = ppd->dd;
10005 u32 mask = ~((1U << ppd->lmc) - 1);
10006 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10007
Mike Marciniszyn77241052015-07-30 15:17:43 -040010008 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10009 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10010 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -080010011 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -040010012 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10013 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10014 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10015
10016 /*
10017 * Iterate over all the send contexts and set their SLID check
10018 */
10019 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10020 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10021 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10022 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10023
10024 for (i = 0; i < dd->chip_send_contexts; i++) {
10025 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10026 i, (u32)sreg);
10027 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10028 }
10029
10030 /* Now we have to do the same thing for the sdma engines */
10031 sdma_update_lmc(dd, mask, ppd->lid);
10032}
10033
Dean Luick6854c692016-07-25 13:38:56 -070010034static const char *state_completed_string(u32 completed)
10035{
10036 static const char * const state_completed[] = {
10037 "EstablishComm",
10038 "OptimizeEQ",
10039 "VerifyCap"
10040 };
10041
10042 if (completed < ARRAY_SIZE(state_completed))
10043 return state_completed[completed];
10044
10045 return "unknown";
10046}
10047
10048static const char all_lanes_dead_timeout_expired[] =
10049 "All lanes were inactive – was the interconnect media removed?";
10050static const char tx_out_of_policy[] =
10051 "Passing lanes on local port do not meet the local link width policy";
10052static const char no_state_complete[] =
10053 "State timeout occurred before link partner completed the state";
10054static const char * const state_complete_reasons[] = {
10055 [0x00] = "Reason unknown",
10056 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10057 [0x02] = "Link partner reported failure",
10058 [0x10] = "Unable to achieve frame sync on any lane",
10059 [0x11] =
10060 "Unable to find a common bit rate with the link partner",
10061 [0x12] =
10062 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10063 [0x13] =
10064 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10065 [0x14] = no_state_complete,
10066 [0x15] =
10067 "State timeout occurred before link partner identified equalization presets",
10068 [0x16] =
10069 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10070 [0x17] = tx_out_of_policy,
10071 [0x20] = all_lanes_dead_timeout_expired,
10072 [0x21] =
10073 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10074 [0x22] = no_state_complete,
10075 [0x23] =
10076 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10077 [0x24] = tx_out_of_policy,
10078 [0x30] = all_lanes_dead_timeout_expired,
10079 [0x31] =
10080 "State timeout occurred waiting for host to process received frames",
10081 [0x32] = no_state_complete,
10082 [0x33] =
10083 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10084 [0x34] = tx_out_of_policy,
10085};
10086
10087static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10088 u32 code)
10089{
10090 const char *str = NULL;
10091
10092 if (code < ARRAY_SIZE(state_complete_reasons))
10093 str = state_complete_reasons[code];
10094
10095 if (str)
10096 return str;
10097 return "Reserved";
10098}
10099
10100/* describe the given last state complete frame */
10101static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10102 const char *prefix)
10103{
10104 struct hfi1_devdata *dd = ppd->dd;
10105 u32 success;
10106 u32 state;
10107 u32 reason;
10108 u32 lanes;
10109
10110 /*
10111 * Decode frame:
10112 * [ 0: 0] - success
10113 * [ 3: 1] - state
10114 * [ 7: 4] - next state timeout
10115 * [15: 8] - reason code
10116 * [31:16] - lanes
10117 */
10118 success = frame & 0x1;
10119 state = (frame >> 1) & 0x7;
10120 reason = (frame >> 8) & 0xff;
10121 lanes = (frame >> 16) & 0xffff;
10122
10123 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10124 prefix, frame);
10125 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10126 state_completed_string(state), state);
10127 dd_dev_err(dd, " state successfully completed: %s\n",
10128 success ? "yes" : "no");
10129 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10130 reason, state_complete_reason_code_string(ppd, reason));
10131 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10132}
10133
10134/*
10135 * Read the last state complete frames and explain them. This routine
10136 * expects to be called if the link went down during link negotiation
10137 * and initialization (LNI). That is, anywhere between polling and link up.
10138 */
10139static void check_lni_states(struct hfi1_pportdata *ppd)
10140{
10141 u32 last_local_state;
10142 u32 last_remote_state;
10143
10144 read_last_local_state(ppd->dd, &last_local_state);
10145 read_last_remote_state(ppd->dd, &last_remote_state);
10146
10147 /*
10148 * Don't report anything if there is nothing to report. A value of
10149 * 0 means the link was taken down while polling and there was no
10150 * training in-process.
10151 */
10152 if (last_local_state == 0 && last_remote_state == 0)
10153 return;
10154
10155 decode_state_complete(ppd, last_local_state, "transmitted");
10156 decode_state_complete(ppd, last_remote_state, "received");
10157}
10158
Dean Luickec8a1422017-03-20 17:24:39 -070010159/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10160static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10161{
10162 u64 reg;
10163 unsigned long timeout;
10164
10165 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10166 timeout = jiffies + msecs_to_jiffies(wait_ms);
10167 while (1) {
10168 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10169 if (reg)
10170 break;
10171 if (time_after(jiffies, timeout)) {
10172 dd_dev_err(dd,
10173 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10174 return -ETIMEDOUT;
10175 }
10176 udelay(2);
10177 }
10178 return 0;
10179}
10180
10181/* called when the logical link state is not down as it should be */
10182static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10183{
10184 struct hfi1_devdata *dd = ppd->dd;
10185
10186 /*
10187 * Bring link up in LCB loopback
10188 */
10189 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10190 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10191 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10192
10193 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10194 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10195 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10196 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10197
10198 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10199 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10200 udelay(3);
10201 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10202 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10203
10204 wait_link_transfer_active(dd, 100);
10205
10206 /*
10207 * Bring the link down again.
10208 */
10209 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10210 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10211 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10212
10213 /* call again to adjust ppd->statusp, if needed */
10214 get_logical_state(ppd);
10215}
10216
Mike Marciniszyn77241052015-07-30 15:17:43 -040010217/*
10218 * Helper for set_link_state(). Do not call except from that routine.
10219 * Expects ppd->hls_mutex to be held.
10220 *
10221 * @rem_reason value to be sent to the neighbor
10222 *
10223 * LinkDownReasons only set if transition succeeds.
10224 */
10225static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10226{
10227 struct hfi1_devdata *dd = ppd->dd;
10228 u32 pstate, previous_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010229 int ret;
10230 int do_transition;
10231 int do_wait;
10232
Michael J. Ruhl86884262017-03-20 17:24:51 -070010233 update_lcb_cache(dd);
10234
Mike Marciniszyn77241052015-07-30 15:17:43 -040010235 previous_state = ppd->host_link_state;
10236 ppd->host_link_state = HLS_GOING_OFFLINE;
10237 pstate = read_physical_state(dd);
10238 if (pstate == PLS_OFFLINE) {
10239 do_transition = 0; /* in right state */
10240 do_wait = 0; /* ...no need to wait */
Jakub Byczkowski02d10082017-05-04 05:13:58 -070010241 } else if ((pstate & 0xf0) == PLS_OFFLINE) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010242 do_transition = 0; /* in an offline transient state */
10243 do_wait = 1; /* ...wait for it to settle */
10244 } else {
10245 do_transition = 1; /* need to move to offline */
10246 do_wait = 1; /* ...will need to wait */
10247 }
10248
10249 if (do_transition) {
10250 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -080010251 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010252
10253 if (ret != HCMD_SUCCESS) {
10254 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010255 "Failed to transition to Offline link state, return %d\n",
10256 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010257 return -EINVAL;
10258 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010259 if (ppd->offline_disabled_reason ==
10260 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010261 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -080010262 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010263 }
10264
10265 if (do_wait) {
10266 /* it can take a while for the link to go down */
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010267 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010268 if (ret < 0)
10269 return ret;
10270 }
10271
Mike Marciniszyn77241052015-07-30 15:17:43 -040010272 /*
10273 * Now in charge of LCB - must be after the physical state is
10274 * offline.quiet and before host_link_state is changed.
10275 */
10276 set_host_lcb_access(dd);
10277 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
Dean Luickec8a1422017-03-20 17:24:39 -070010278
10279 /* make sure the logical state is also down */
10280 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10281 if (ret)
10282 force_logical_link_state_down(ppd);
10283
Mike Marciniszyn77241052015-07-30 15:17:43 -040010284 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10285
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010286 if (ppd->port_type == PORT_TYPE_QSFP &&
10287 ppd->qsfp_info.limiting_active &&
10288 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -080010289 int ret;
10290
10291 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10292 if (ret == 0) {
10293 set_qsfp_tx(ppd, 0);
10294 release_chip_resource(dd, qsfp_resource(dd));
10295 } else {
10296 /* not fatal, but should warn */
10297 dd_dev_err(dd,
10298 "Unable to acquire lock to turn off QSFP TX\n");
10299 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010300 }
10301
Mike Marciniszyn77241052015-07-30 15:17:43 -040010302 /*
10303 * The LNI has a mandatory wait time after the physical state
10304 * moves to Offline.Quiet. The wait time may be different
10305 * depending on how the link went down. The 8051 firmware
10306 * will observe the needed wait time and only move to ready
10307 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010308 * is 6s, so wait that long and then at least 0.5s more for
10309 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010310 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010311 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010312 if (ret) {
10313 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010314 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010315 /* state is really offline, so make it so */
10316 ppd->host_link_state = HLS_DN_OFFLINE;
10317 return ret;
10318 }
10319
10320 /*
10321 * The state is now offline and the 8051 is ready to accept host
10322 * requests.
10323 * - change our state
10324 * - notify others if we were previously in a linkup state
10325 */
10326 ppd->host_link_state = HLS_DN_OFFLINE;
10327 if (previous_state & HLS_UP) {
10328 /* went down while link was up */
10329 handle_linkup_change(dd, 0);
10330 } else if (previous_state
10331 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10332 /* went down while attempting link up */
Dean Luick6854c692016-07-25 13:38:56 -070010333 check_lni_states(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010334 }
10335
10336 /* the active link width (downgrade) is 0 on link down */
10337 ppd->link_width_active = 0;
10338 ppd->link_width_downgrade_tx_active = 0;
10339 ppd->link_width_downgrade_rx_active = 0;
10340 ppd->current_egress_rate = 0;
10341 return 0;
10342}
10343
10344/* return the link state name */
10345static const char *link_state_name(u32 state)
10346{
10347 const char *name;
10348 int n = ilog2(state);
10349 static const char * const names[] = {
10350 [__HLS_UP_INIT_BP] = "INIT",
10351 [__HLS_UP_ARMED_BP] = "ARMED",
10352 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10353 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10354 [__HLS_DN_POLL_BP] = "POLL",
10355 [__HLS_DN_DISABLE_BP] = "DISABLE",
10356 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10357 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10358 [__HLS_GOING_UP_BP] = "GOING_UP",
10359 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10360 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10361 };
10362
10363 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10364 return name ? name : "unknown";
10365}
10366
10367/* return the link state reason name */
10368static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10369{
10370 if (state == HLS_UP_INIT) {
10371 switch (ppd->linkinit_reason) {
10372 case OPA_LINKINIT_REASON_LINKUP:
10373 return "(LINKUP)";
10374 case OPA_LINKINIT_REASON_FLAPPING:
10375 return "(FLAPPING)";
10376 case OPA_LINKINIT_OUTSIDE_POLICY:
10377 return "(OUTSIDE_POLICY)";
10378 case OPA_LINKINIT_QUARANTINED:
10379 return "(QUARANTINED)";
10380 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10381 return "(INSUFIC_CAPABILITY)";
10382 default:
10383 break;
10384 }
10385 }
10386 return "";
10387}
10388
10389/*
10390 * driver_physical_state - convert the driver's notion of a port's
10391 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10392 * Return -1 (converted to a u32) to indicate error.
10393 */
10394u32 driver_physical_state(struct hfi1_pportdata *ppd)
10395{
10396 switch (ppd->host_link_state) {
10397 case HLS_UP_INIT:
10398 case HLS_UP_ARMED:
10399 case HLS_UP_ACTIVE:
10400 return IB_PORTPHYSSTATE_LINKUP;
10401 case HLS_DN_POLL:
10402 return IB_PORTPHYSSTATE_POLLING;
10403 case HLS_DN_DISABLE:
10404 return IB_PORTPHYSSTATE_DISABLED;
10405 case HLS_DN_OFFLINE:
10406 return OPA_PORTPHYSSTATE_OFFLINE;
10407 case HLS_VERIFY_CAP:
10408 return IB_PORTPHYSSTATE_POLLING;
10409 case HLS_GOING_UP:
10410 return IB_PORTPHYSSTATE_POLLING;
10411 case HLS_GOING_OFFLINE:
10412 return OPA_PORTPHYSSTATE_OFFLINE;
10413 case HLS_LINK_COOLDOWN:
10414 return OPA_PORTPHYSSTATE_OFFLINE;
10415 case HLS_DN_DOWNDEF:
10416 default:
10417 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10418 ppd->host_link_state);
10419 return -1;
10420 }
10421}
10422
10423/*
10424 * driver_logical_state - convert the driver's notion of a port's
10425 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10426 * (converted to a u32) to indicate error.
10427 */
10428u32 driver_logical_state(struct hfi1_pportdata *ppd)
10429{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010430 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010431 return IB_PORT_DOWN;
10432
10433 switch (ppd->host_link_state & HLS_UP) {
10434 case HLS_UP_INIT:
10435 return IB_PORT_INIT;
10436 case HLS_UP_ARMED:
10437 return IB_PORT_ARMED;
10438 case HLS_UP_ACTIVE:
10439 return IB_PORT_ACTIVE;
10440 default:
10441 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10442 ppd->host_link_state);
10443 return -1;
10444 }
10445}
10446
10447void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10448 u8 neigh_reason, u8 rem_reason)
10449{
10450 if (ppd->local_link_down_reason.latest == 0 &&
10451 ppd->neigh_link_down_reason.latest == 0) {
10452 ppd->local_link_down_reason.latest = lcl_reason;
10453 ppd->neigh_link_down_reason.latest = neigh_reason;
10454 ppd->remote_link_down_reason = rem_reason;
10455 }
10456}
10457
10458/*
10459 * Change the physical and/or logical link state.
10460 *
10461 * Do not call this routine while inside an interrupt. It contains
10462 * calls to routines that can take multiple seconds to finish.
10463 *
10464 * Returns 0 on success, -errno on failure.
10465 */
10466int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10467{
10468 struct hfi1_devdata *dd = ppd->dd;
10469 struct ib_event event = {.device = NULL};
10470 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010471 int orig_new_state, poll_bounce;
10472
10473 mutex_lock(&ppd->hls_lock);
10474
10475 orig_new_state = state;
10476 if (state == HLS_DN_DOWNDEF)
10477 state = dd->link_default;
10478
10479 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010480 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10481 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010482
10483 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010484 link_state_name(ppd->host_link_state),
10485 link_state_name(orig_new_state),
10486 poll_bounce ? "(bounce) " : "",
10487 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010488
Mike Marciniszyn77241052015-07-30 15:17:43 -040010489 /*
10490 * If we're going to a (HLS_*) link state that implies the logical
10491 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10492 * reset is_sm_config_started to 0.
10493 */
10494 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10495 ppd->is_sm_config_started = 0;
10496
10497 /*
10498 * Do nothing if the states match. Let a poll to poll link bounce
10499 * go through.
10500 */
10501 if (ppd->host_link_state == state && !poll_bounce)
10502 goto done;
10503
10504 switch (state) {
10505 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010506 if (ppd->host_link_state == HLS_DN_POLL &&
10507 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010508 /*
10509 * Quick link up jumps from polling to here.
10510 *
10511 * Whether in normal or loopback mode, the
10512 * simulator jumps from polling to link up.
10513 * Accept that here.
10514 */
Jubin John17fb4f22016-02-14 20:21:52 -080010515 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010516 } else if (ppd->host_link_state != HLS_GOING_UP) {
10517 goto unexpected;
10518 }
10519
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010520 /*
10521 * Wait for Link_Up physical state.
10522 * Physical and Logical states should already be
10523 * be transitioned to LinkUp and LinkInit respectively.
10524 */
10525 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10526 if (ret) {
10527 dd_dev_err(dd,
10528 "%s: physical state did not change to LINK-UP\n",
10529 __func__);
10530 break;
10531 }
10532
Mike Marciniszyn77241052015-07-30 15:17:43 -040010533 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10534 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010535 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010536 "%s: logical state did not change to INIT\n",
10537 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010538 } else {
10539 /* clear old transient LINKINIT_REASON code */
10540 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10541 ppd->linkinit_reason =
10542 OPA_LINKINIT_REASON_LINKUP;
10543
10544 /* enable the port */
10545 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10546
10547 handle_linkup_change(dd, 1);
Stuart Summers98b9ee22017-04-09 10:16:53 -070010548 ppd->host_link_state = HLS_UP_INIT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010549 }
10550 break;
10551 case HLS_UP_ARMED:
10552 if (ppd->host_link_state != HLS_UP_INIT)
10553 goto unexpected;
10554
10555 ppd->host_link_state = HLS_UP_ARMED;
10556 set_logical_state(dd, LSTATE_ARMED);
10557 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10558 if (ret) {
10559 /* logical state didn't change, stay at init */
10560 ppd->host_link_state = HLS_UP_INIT;
10561 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010562 "%s: logical state did not change to ARMED\n",
10563 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010564 }
10565 /*
10566 * The simulator does not currently implement SMA messages,
10567 * so neighbor_normal is not set. Set it here when we first
10568 * move to Armed.
10569 */
10570 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10571 ppd->neighbor_normal = 1;
10572 break;
10573 case HLS_UP_ACTIVE:
10574 if (ppd->host_link_state != HLS_UP_ARMED)
10575 goto unexpected;
10576
10577 ppd->host_link_state = HLS_UP_ACTIVE;
10578 set_logical_state(dd, LSTATE_ACTIVE);
10579 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10580 if (ret) {
10581 /* logical state didn't change, stay at armed */
10582 ppd->host_link_state = HLS_UP_ARMED;
10583 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010584 "%s: logical state did not change to ACTIVE\n",
10585 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010586 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010587 /* tell all engines to go running */
10588 sdma_all_running(dd);
10589
10590 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010591 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010592 event.element.port_num = ppd->port;
10593 event.event = IB_EVENT_PORT_ACTIVE;
10594 }
10595 break;
10596 case HLS_DN_POLL:
10597 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10598 ppd->host_link_state == HLS_DN_OFFLINE) &&
10599 dd->dc_shutdown)
10600 dc_start(dd);
10601 /* Hand LED control to the DC */
10602 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10603
10604 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10605 u8 tmp = ppd->link_enabled;
10606
10607 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10608 if (ret) {
10609 ppd->link_enabled = tmp;
10610 break;
10611 }
10612 ppd->remote_link_down_reason = 0;
10613
10614 if (ppd->driver_link_ready)
10615 ppd->link_enabled = 1;
10616 }
10617
Jim Snowfb9036d2016-01-11 18:32:21 -050010618 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010619 ret = set_local_link_attributes(ppd);
10620 if (ret)
10621 break;
10622
10623 ppd->port_error_action = 0;
10624 ppd->host_link_state = HLS_DN_POLL;
10625
10626 if (quick_linkup) {
10627 /* quick linkup does not go into polling */
10628 ret = do_quick_linkup(dd);
10629 } else {
10630 ret1 = set_physical_link_state(dd, PLS_POLLING);
10631 if (ret1 != HCMD_SUCCESS) {
10632 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010633 "Failed to transition to Polling link state, return 0x%x\n",
10634 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010635 ret = -EINVAL;
10636 }
10637 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010638 ppd->offline_disabled_reason =
10639 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010640 /*
10641 * If an error occurred above, go back to offline. The
10642 * caller may reschedule another attempt.
10643 */
10644 if (ret)
10645 goto_offline(ppd, 0);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010646 else
10647 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010648 break;
10649 case HLS_DN_DISABLE:
10650 /* link is disabled */
10651 ppd->link_enabled = 0;
10652
10653 /* allow any state to transition to disabled */
10654
10655 /* must transition to offline first */
10656 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10657 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10658 if (ret)
10659 break;
10660 ppd->remote_link_down_reason = 0;
10661 }
10662
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010663 if (!dd->dc_shutdown) {
10664 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10665 if (ret1 != HCMD_SUCCESS) {
10666 dd_dev_err(dd,
10667 "Failed to transition to Disabled link state, return 0x%x\n",
10668 ret1);
10669 ret = -EINVAL;
10670 break;
10671 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010672 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10673 if (ret) {
10674 dd_dev_err(dd,
10675 "%s: physical state did not change to DISABLED\n",
10676 __func__);
10677 break;
10678 }
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010679 dc_shutdown(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010680 }
10681 ppd->host_link_state = HLS_DN_DISABLE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010682 break;
10683 case HLS_DN_OFFLINE:
10684 if (ppd->host_link_state == HLS_DN_DISABLE)
10685 dc_start(dd);
10686
10687 /* allow any state to transition to offline */
10688 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10689 if (!ret)
10690 ppd->remote_link_down_reason = 0;
10691 break;
10692 case HLS_VERIFY_CAP:
10693 if (ppd->host_link_state != HLS_DN_POLL)
10694 goto unexpected;
10695 ppd->host_link_state = HLS_VERIFY_CAP;
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010696 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010697 break;
10698 case HLS_GOING_UP:
10699 if (ppd->host_link_state != HLS_VERIFY_CAP)
10700 goto unexpected;
10701
10702 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10703 if (ret1 != HCMD_SUCCESS) {
10704 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010705 "Failed to transition to link up state, return 0x%x\n",
10706 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010707 ret = -EINVAL;
10708 break;
10709 }
10710 ppd->host_link_state = HLS_GOING_UP;
10711 break;
10712
10713 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10714 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10715 default:
10716 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010717 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010718 ret = -EINVAL;
10719 break;
10720 }
10721
Mike Marciniszyn77241052015-07-30 15:17:43 -040010722 goto done;
10723
10724unexpected:
10725 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010726 __func__, link_state_name(ppd->host_link_state),
10727 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010728 ret = -EINVAL;
10729
10730done:
10731 mutex_unlock(&ppd->hls_lock);
10732
10733 if (event.device)
10734 ib_dispatch_event(&event);
10735
10736 return ret;
10737}
10738
10739int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10740{
10741 u64 reg;
10742 int ret = 0;
10743
10744 switch (which) {
10745 case HFI1_IB_CFG_LIDLMC:
10746 set_lidlmc(ppd);
10747 break;
10748 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10749 /*
10750 * The VL Arbitrator high limit is sent in units of 4k
10751 * bytes, while HFI stores it in units of 64 bytes.
10752 */
Jubin John8638b772016-02-14 20:19:24 -080010753 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010754 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10755 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10756 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10757 break;
10758 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10759 /* HFI only supports POLL as the default link down state */
10760 if (val != HLS_DN_POLL)
10761 ret = -EINVAL;
10762 break;
10763 case HFI1_IB_CFG_OP_VLS:
10764 if (ppd->vls_operational != val) {
10765 ppd->vls_operational = val;
10766 if (!ppd->port)
10767 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010768 }
10769 break;
10770 /*
10771 * For link width, link width downgrade, and speed enable, always AND
10772 * the setting with what is actually supported. This has two benefits.
10773 * First, enabled can't have unsupported values, no matter what the
10774 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10775 * "fill in with your supported value" have all the bits in the
10776 * field set, so simply ANDing with supported has the desired result.
10777 */
10778 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10779 ppd->link_width_enabled = val & ppd->link_width_supported;
10780 break;
10781 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10782 ppd->link_width_downgrade_enabled =
10783 val & ppd->link_width_downgrade_supported;
10784 break;
10785 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10786 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10787 break;
10788 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10789 /*
10790 * HFI does not follow IB specs, save this value
10791 * so we can report it, if asked.
10792 */
10793 ppd->overrun_threshold = val;
10794 break;
10795 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10796 /*
10797 * HFI does not follow IB specs, save this value
10798 * so we can report it, if asked.
10799 */
10800 ppd->phy_error_threshold = val;
10801 break;
10802
10803 case HFI1_IB_CFG_MTU:
10804 set_send_length(ppd);
10805 break;
10806
10807 case HFI1_IB_CFG_PKEYS:
10808 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10809 set_partition_keys(ppd);
10810 break;
10811
10812 default:
10813 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10814 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010815 "%s: which %s, val 0x%x: not implemented\n",
10816 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010817 break;
10818 }
10819 return ret;
10820}
10821
10822/* begin functions related to vl arbitration table caching */
10823static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10824{
10825 int i;
10826
10827 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10828 VL_ARB_LOW_PRIO_TABLE_SIZE);
10829 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10830 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10831
10832 /*
10833 * Note that we always return values directly from the
10834 * 'vl_arb_cache' (and do no CSR reads) in response to a
10835 * 'Get(VLArbTable)'. This is obviously correct after a
10836 * 'Set(VLArbTable)', since the cache will then be up to
10837 * date. But it's also correct prior to any 'Set(VLArbTable)'
10838 * since then both the cache, and the relevant h/w registers
10839 * will be zeroed.
10840 */
10841
10842 for (i = 0; i < MAX_PRIO_TABLE; i++)
10843 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10844}
10845
10846/*
10847 * vl_arb_lock_cache
10848 *
10849 * All other vl_arb_* functions should be called only after locking
10850 * the cache.
10851 */
10852static inline struct vl_arb_cache *
10853vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10854{
10855 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10856 return NULL;
10857 spin_lock(&ppd->vl_arb_cache[idx].lock);
10858 return &ppd->vl_arb_cache[idx];
10859}
10860
10861static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10862{
10863 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10864}
10865
10866static void vl_arb_get_cache(struct vl_arb_cache *cache,
10867 struct ib_vl_weight_elem *vl)
10868{
10869 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10870}
10871
10872static void vl_arb_set_cache(struct vl_arb_cache *cache,
10873 struct ib_vl_weight_elem *vl)
10874{
10875 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10876}
10877
10878static int vl_arb_match_cache(struct vl_arb_cache *cache,
10879 struct ib_vl_weight_elem *vl)
10880{
10881 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10882}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010883
Mike Marciniszyn77241052015-07-30 15:17:43 -040010884/* end functions related to vl arbitration table caching */
10885
10886static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10887 u32 size, struct ib_vl_weight_elem *vl)
10888{
10889 struct hfi1_devdata *dd = ppd->dd;
10890 u64 reg;
10891 unsigned int i, is_up = 0;
10892 int drain, ret = 0;
10893
10894 mutex_lock(&ppd->hls_lock);
10895
10896 if (ppd->host_link_state & HLS_UP)
10897 is_up = 1;
10898
10899 drain = !is_ax(dd) && is_up;
10900
10901 if (drain)
10902 /*
10903 * Before adjusting VL arbitration weights, empty per-VL
10904 * FIFOs, otherwise a packet whose VL weight is being
10905 * set to 0 could get stuck in a FIFO with no chance to
10906 * egress.
10907 */
10908 ret = stop_drain_data_vls(dd);
10909
10910 if (ret) {
10911 dd_dev_err(
10912 dd,
10913 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10914 __func__);
10915 goto err;
10916 }
10917
10918 for (i = 0; i < size; i++, vl++) {
10919 /*
10920 * NOTE: The low priority shift and mask are used here, but
10921 * they are the same for both the low and high registers.
10922 */
10923 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10924 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10925 | (((u64)vl->weight
10926 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10927 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10928 write_csr(dd, target + (i * 8), reg);
10929 }
10930 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10931
10932 if (drain)
10933 open_fill_data_vls(dd); /* reopen all VLs */
10934
10935err:
10936 mutex_unlock(&ppd->hls_lock);
10937
10938 return ret;
10939}
10940
10941/*
10942 * Read one credit merge VL register.
10943 */
10944static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10945 struct vl_limit *vll)
10946{
10947 u64 reg = read_csr(dd, csr);
10948
10949 vll->dedicated = cpu_to_be16(
10950 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10951 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10952 vll->shared = cpu_to_be16(
10953 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10954 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10955}
10956
10957/*
10958 * Read the current credit merge limits.
10959 */
10960static int get_buffer_control(struct hfi1_devdata *dd,
10961 struct buffer_control *bc, u16 *overall_limit)
10962{
10963 u64 reg;
10964 int i;
10965
10966 /* not all entries are filled in */
10967 memset(bc, 0, sizeof(*bc));
10968
10969 /* OPA and HFI have a 1-1 mapping */
10970 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010971 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010972
10973 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10974 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10975
10976 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10977 bc->overall_shared_limit = cpu_to_be16(
10978 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10979 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10980 if (overall_limit)
10981 *overall_limit = (reg
10982 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10983 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10984 return sizeof(struct buffer_control);
10985}
10986
10987static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10988{
10989 u64 reg;
10990 int i;
10991
10992 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10993 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10994 for (i = 0; i < sizeof(u64); i++) {
10995 u8 byte = *(((u8 *)&reg) + i);
10996
10997 dp->vlnt[2 * i] = byte & 0xf;
10998 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10999 }
11000
11001 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11002 for (i = 0; i < sizeof(u64); i++) {
11003 u8 byte = *(((u8 *)&reg) + i);
11004
11005 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11006 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11007 }
11008 return sizeof(struct sc2vlnt);
11009}
11010
11011static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11012 struct ib_vl_weight_elem *vl)
11013{
11014 unsigned int i;
11015
11016 for (i = 0; i < nelems; i++, vl++) {
11017 vl->vl = 0xf;
11018 vl->weight = 0;
11019 }
11020}
11021
11022static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11023{
11024 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080011025 DC_SC_VL_VAL(15_0,
11026 0, dp->vlnt[0] & 0xf,
11027 1, dp->vlnt[1] & 0xf,
11028 2, dp->vlnt[2] & 0xf,
11029 3, dp->vlnt[3] & 0xf,
11030 4, dp->vlnt[4] & 0xf,
11031 5, dp->vlnt[5] & 0xf,
11032 6, dp->vlnt[6] & 0xf,
11033 7, dp->vlnt[7] & 0xf,
11034 8, dp->vlnt[8] & 0xf,
11035 9, dp->vlnt[9] & 0xf,
11036 10, dp->vlnt[10] & 0xf,
11037 11, dp->vlnt[11] & 0xf,
11038 12, dp->vlnt[12] & 0xf,
11039 13, dp->vlnt[13] & 0xf,
11040 14, dp->vlnt[14] & 0xf,
11041 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011042 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080011043 DC_SC_VL_VAL(31_16,
11044 16, dp->vlnt[16] & 0xf,
11045 17, dp->vlnt[17] & 0xf,
11046 18, dp->vlnt[18] & 0xf,
11047 19, dp->vlnt[19] & 0xf,
11048 20, dp->vlnt[20] & 0xf,
11049 21, dp->vlnt[21] & 0xf,
11050 22, dp->vlnt[22] & 0xf,
11051 23, dp->vlnt[23] & 0xf,
11052 24, dp->vlnt[24] & 0xf,
11053 25, dp->vlnt[25] & 0xf,
11054 26, dp->vlnt[26] & 0xf,
11055 27, dp->vlnt[27] & 0xf,
11056 28, dp->vlnt[28] & 0xf,
11057 29, dp->vlnt[29] & 0xf,
11058 30, dp->vlnt[30] & 0xf,
11059 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011060}
11061
11062static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11063 u16 limit)
11064{
11065 if (limit != 0)
11066 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011067 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011068}
11069
11070/* change only the shared limit portion of SendCmGLobalCredit */
11071static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11072{
11073 u64 reg;
11074
11075 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11076 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11077 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11078 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11079}
11080
11081/* change only the total credit limit portion of SendCmGLobalCredit */
11082static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11083{
11084 u64 reg;
11085
11086 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11087 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11088 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11089 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11090}
11091
11092/* set the given per-VL shared limit */
11093static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11094{
11095 u64 reg;
11096 u32 addr;
11097
11098 if (vl < TXE_NUM_DATA_VL)
11099 addr = SEND_CM_CREDIT_VL + (8 * vl);
11100 else
11101 addr = SEND_CM_CREDIT_VL15;
11102
11103 reg = read_csr(dd, addr);
11104 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11105 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11106 write_csr(dd, addr, reg);
11107}
11108
11109/* set the given per-VL dedicated limit */
11110static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11111{
11112 u64 reg;
11113 u32 addr;
11114
11115 if (vl < TXE_NUM_DATA_VL)
11116 addr = SEND_CM_CREDIT_VL + (8 * vl);
11117 else
11118 addr = SEND_CM_CREDIT_VL15;
11119
11120 reg = read_csr(dd, addr);
11121 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11122 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11123 write_csr(dd, addr, reg);
11124}
11125
11126/* spin until the given per-VL status mask bits clear */
11127static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11128 const char *which)
11129{
11130 unsigned long timeout;
11131 u64 reg;
11132
11133 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11134 while (1) {
11135 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11136
11137 if (reg == 0)
11138 return; /* success */
11139 if (time_after(jiffies, timeout))
11140 break; /* timed out */
11141 udelay(1);
11142 }
11143
11144 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011145 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11146 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011147 /*
11148 * If this occurs, it is likely there was a credit loss on the link.
11149 * The only recovery from that is a link bounce.
11150 */
11151 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011152 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011153}
11154
11155/*
11156 * The number of credits on the VLs may be changed while everything
11157 * is "live", but the following algorithm must be followed due to
11158 * how the hardware is actually implemented. In particular,
11159 * Return_Credit_Status[] is the only correct status check.
11160 *
11161 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11162 * set Global_Shared_Credit_Limit = 0
11163 * use_all_vl = 1
11164 * mask0 = all VLs that are changing either dedicated or shared limits
11165 * set Shared_Limit[mask0] = 0
11166 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11167 * if (changing any dedicated limit)
11168 * mask1 = all VLs that are lowering dedicated limits
11169 * lower Dedicated_Limit[mask1]
11170 * spin until Return_Credit_Status[mask1] == 0
11171 * raise Dedicated_Limits
11172 * raise Shared_Limits
11173 * raise Global_Shared_Credit_Limit
11174 *
11175 * lower = if the new limit is lower, set the limit to the new value
11176 * raise = if the new limit is higher than the current value (may be changed
11177 * earlier in the algorithm), set the new limit to the new value
11178 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011179int set_buffer_control(struct hfi1_pportdata *ppd,
11180 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011181{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011182 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011183 u64 changing_mask, ld_mask, stat_mask;
11184 int change_count;
11185 int i, use_all_mask;
11186 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011187 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011188 /*
11189 * A0: add the variable any_shared_limit_changing below and in the
11190 * algorithm above. If removing A0 support, it can be removed.
11191 */
11192 int any_shared_limit_changing;
11193 struct buffer_control cur_bc;
11194 u8 changing[OPA_MAX_VLS];
11195 u8 lowering_dedicated[OPA_MAX_VLS];
11196 u16 cur_total;
11197 u32 new_total = 0;
11198 const u64 all_mask =
11199 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11200 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11201 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11202 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11203 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11204 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11205 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11206 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11207 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11208
11209#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11210#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11211
Mike Marciniszyn77241052015-07-30 15:17:43 -040011212 /* find the new total credits, do sanity check on unused VLs */
11213 for (i = 0; i < OPA_MAX_VLS; i++) {
11214 if (valid_vl(i)) {
11215 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11216 continue;
11217 }
11218 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080011219 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011220 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080011221 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011222 new_bc->vl[i].dedicated = 0;
11223 new_bc->vl[i].shared = 0;
11224 }
11225 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050011226
Mike Marciniszyn77241052015-07-30 15:17:43 -040011227 /* fetch the current values */
11228 get_buffer_control(dd, &cur_bc, &cur_total);
11229
11230 /*
11231 * Create the masks we will use.
11232 */
11233 memset(changing, 0, sizeof(changing));
11234 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080011235 /*
11236 * NOTE: Assumes that the individual VL bits are adjacent and in
11237 * increasing order
11238 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011239 stat_mask =
11240 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11241 changing_mask = 0;
11242 ld_mask = 0;
11243 change_count = 0;
11244 any_shared_limit_changing = 0;
11245 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11246 if (!valid_vl(i))
11247 continue;
11248 this_shared_changing = new_bc->vl[i].shared
11249 != cur_bc.vl[i].shared;
11250 if (this_shared_changing)
11251 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080011252 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11253 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011254 changing[i] = 1;
11255 changing_mask |= stat_mask;
11256 change_count++;
11257 }
11258 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11259 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11260 lowering_dedicated[i] = 1;
11261 ld_mask |= stat_mask;
11262 }
11263 }
11264
11265 /* bracket the credit change with a total adjustment */
11266 if (new_total > cur_total)
11267 set_global_limit(dd, new_total);
11268
11269 /*
11270 * Start the credit change algorithm.
11271 */
11272 use_all_mask = 0;
11273 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011274 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11275 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011276 set_global_shared(dd, 0);
11277 cur_bc.overall_shared_limit = 0;
11278 use_all_mask = 1;
11279 }
11280
11281 for (i = 0; i < NUM_USABLE_VLS; i++) {
11282 if (!valid_vl(i))
11283 continue;
11284
11285 if (changing[i]) {
11286 set_vl_shared(dd, i, 0);
11287 cur_bc.vl[i].shared = 0;
11288 }
11289 }
11290
11291 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080011292 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011293
11294 if (change_count > 0) {
11295 for (i = 0; i < NUM_USABLE_VLS; i++) {
11296 if (!valid_vl(i))
11297 continue;
11298
11299 if (lowering_dedicated[i]) {
11300 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011301 be16_to_cpu(new_bc->
11302 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011303 cur_bc.vl[i].dedicated =
11304 new_bc->vl[i].dedicated;
11305 }
11306 }
11307
11308 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11309
11310 /* now raise all dedicated that are going up */
11311 for (i = 0; i < NUM_USABLE_VLS; i++) {
11312 if (!valid_vl(i))
11313 continue;
11314
11315 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11316 be16_to_cpu(cur_bc.vl[i].dedicated))
11317 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011318 be16_to_cpu(new_bc->
11319 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011320 }
11321 }
11322
11323 /* next raise all shared that are going up */
11324 for (i = 0; i < NUM_USABLE_VLS; i++) {
11325 if (!valid_vl(i))
11326 continue;
11327
11328 if (be16_to_cpu(new_bc->vl[i].shared) >
11329 be16_to_cpu(cur_bc.vl[i].shared))
11330 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11331 }
11332
11333 /* finally raise the global shared */
11334 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011335 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011336 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011337 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011338
11339 /* bracket the credit change with a total adjustment */
11340 if (new_total < cur_total)
11341 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011342
11343 /*
11344 * Determine the actual number of operational VLS using the number of
11345 * dedicated and shared credits for each VL.
11346 */
11347 if (change_count > 0) {
11348 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11349 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11350 be16_to_cpu(new_bc->vl[i].shared) > 0)
11351 vl_count++;
11352 ppd->actual_vls_operational = vl_count;
11353 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11354 ppd->actual_vls_operational :
11355 ppd->vls_operational,
11356 NULL);
11357 if (ret == 0)
11358 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11359 ppd->actual_vls_operational :
11360 ppd->vls_operational, NULL);
11361 if (ret)
11362 return ret;
11363 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011364 return 0;
11365}
11366
11367/*
11368 * Read the given fabric manager table. Return the size of the
11369 * table (in bytes) on success, and a negative error code on
11370 * failure.
11371 */
11372int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11373
11374{
11375 int size;
11376 struct vl_arb_cache *vlc;
11377
11378 switch (which) {
11379 case FM_TBL_VL_HIGH_ARB:
11380 size = 256;
11381 /*
11382 * OPA specifies 128 elements (of 2 bytes each), though
11383 * HFI supports only 16 elements in h/w.
11384 */
11385 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11386 vl_arb_get_cache(vlc, t);
11387 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11388 break;
11389 case FM_TBL_VL_LOW_ARB:
11390 size = 256;
11391 /*
11392 * OPA specifies 128 elements (of 2 bytes each), though
11393 * HFI supports only 16 elements in h/w.
11394 */
11395 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11396 vl_arb_get_cache(vlc, t);
11397 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11398 break;
11399 case FM_TBL_BUFFER_CONTROL:
11400 size = get_buffer_control(ppd->dd, t, NULL);
11401 break;
11402 case FM_TBL_SC2VLNT:
11403 size = get_sc2vlnt(ppd->dd, t);
11404 break;
11405 case FM_TBL_VL_PREEMPT_ELEMS:
11406 size = 256;
11407 /* OPA specifies 128 elements, of 2 bytes each */
11408 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11409 break;
11410 case FM_TBL_VL_PREEMPT_MATRIX:
11411 size = 256;
11412 /*
11413 * OPA specifies that this is the same size as the VL
11414 * arbitration tables (i.e., 256 bytes).
11415 */
11416 break;
11417 default:
11418 return -EINVAL;
11419 }
11420 return size;
11421}
11422
11423/*
11424 * Write the given fabric manager table.
11425 */
11426int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11427{
11428 int ret = 0;
11429 struct vl_arb_cache *vlc;
11430
11431 switch (which) {
11432 case FM_TBL_VL_HIGH_ARB:
11433 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11434 if (vl_arb_match_cache(vlc, t)) {
11435 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11436 break;
11437 }
11438 vl_arb_set_cache(vlc, t);
11439 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11440 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11441 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11442 break;
11443 case FM_TBL_VL_LOW_ARB:
11444 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11445 if (vl_arb_match_cache(vlc, t)) {
11446 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11447 break;
11448 }
11449 vl_arb_set_cache(vlc, t);
11450 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11451 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11452 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11453 break;
11454 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011455 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011456 break;
11457 case FM_TBL_SC2VLNT:
11458 set_sc2vlnt(ppd->dd, t);
11459 break;
11460 default:
11461 ret = -EINVAL;
11462 }
11463 return ret;
11464}
11465
11466/*
11467 * Disable all data VLs.
11468 *
11469 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11470 */
11471static int disable_data_vls(struct hfi1_devdata *dd)
11472{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011473 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011474 return 1;
11475
11476 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11477
11478 return 0;
11479}
11480
11481/*
11482 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11483 * Just re-enables all data VLs (the "fill" part happens
11484 * automatically - the name was chosen for symmetry with
11485 * stop_drain_data_vls()).
11486 *
11487 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11488 */
11489int open_fill_data_vls(struct hfi1_devdata *dd)
11490{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011491 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011492 return 1;
11493
11494 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11495
11496 return 0;
11497}
11498
11499/*
11500 * drain_data_vls() - assumes that disable_data_vls() has been called,
11501 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11502 * engines to drop to 0.
11503 */
11504static void drain_data_vls(struct hfi1_devdata *dd)
11505{
11506 sc_wait(dd);
11507 sdma_wait(dd);
11508 pause_for_credit_return(dd);
11509}
11510
11511/*
11512 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11513 *
11514 * Use open_fill_data_vls() to resume using data VLs. This pair is
11515 * meant to be used like this:
11516 *
11517 * stop_drain_data_vls(dd);
11518 * // do things with per-VL resources
11519 * open_fill_data_vls(dd);
11520 */
11521int stop_drain_data_vls(struct hfi1_devdata *dd)
11522{
11523 int ret;
11524
11525 ret = disable_data_vls(dd);
11526 if (ret == 0)
11527 drain_data_vls(dd);
11528
11529 return ret;
11530}
11531
11532/*
11533 * Convert a nanosecond time to a cclock count. No matter how slow
11534 * the cclock, a non-zero ns will always have a non-zero result.
11535 */
11536u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11537{
11538 u32 cclocks;
11539
11540 if (dd->icode == ICODE_FPGA_EMULATION)
11541 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11542 else /* simulation pretends to be ASIC */
11543 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11544 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11545 cclocks = 1;
11546 return cclocks;
11547}
11548
11549/*
11550 * Convert a cclock count to nanoseconds. Not matter how slow
11551 * the cclock, a non-zero cclocks will always have a non-zero result.
11552 */
11553u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11554{
11555 u32 ns;
11556
11557 if (dd->icode == ICODE_FPGA_EMULATION)
11558 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11559 else /* simulation pretends to be ASIC */
11560 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11561 if (cclocks && !ns)
11562 ns = 1;
11563 return ns;
11564}
11565
11566/*
11567 * Dynamically adjust the receive interrupt timeout for a context based on
11568 * incoming packet rate.
11569 *
11570 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11571 */
11572static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11573{
11574 struct hfi1_devdata *dd = rcd->dd;
11575 u32 timeout = rcd->rcvavail_timeout;
11576
11577 /*
11578 * This algorithm doubles or halves the timeout depending on whether
11579 * the number of packets received in this interrupt were less than or
11580 * greater equal the interrupt count.
11581 *
11582 * The calculations below do not allow a steady state to be achieved.
11583 * Only at the endpoints it is possible to have an unchanging
11584 * timeout.
11585 */
11586 if (npkts < rcv_intr_count) {
11587 /*
11588 * Not enough packets arrived before the timeout, adjust
11589 * timeout downward.
11590 */
11591 if (timeout < 2) /* already at minimum? */
11592 return;
11593 timeout >>= 1;
11594 } else {
11595 /*
11596 * More than enough packets arrived before the timeout, adjust
11597 * timeout upward.
11598 */
11599 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11600 return;
11601 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11602 }
11603
11604 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011605 /*
11606 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11607 * been verified to be in range
11608 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011609 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011610 (u64)timeout <<
11611 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011612}
11613
11614void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11615 u32 intr_adjust, u32 npkts)
11616{
11617 struct hfi1_devdata *dd = rcd->dd;
11618 u64 reg;
11619 u32 ctxt = rcd->ctxt;
11620
11621 /*
11622 * Need to write timeout register before updating RcvHdrHead to ensure
11623 * that a new value is used when the HW decides to restart counting.
11624 */
11625 if (intr_adjust)
11626 adjust_rcv_timeout(rcd, npkts);
11627 if (updegr) {
11628 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11629 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11630 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11631 }
11632 mmiowb();
11633 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11634 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11635 << RCV_HDR_HEAD_HEAD_SHIFT);
11636 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11637 mmiowb();
11638}
11639
11640u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11641{
11642 u32 head, tail;
11643
11644 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11645 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11646
11647 if (rcd->rcvhdrtail_kvaddr)
11648 tail = get_rcvhdrtail(rcd);
11649 else
11650 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11651
11652 return head == tail;
11653}
11654
11655/*
11656 * Context Control and Receive Array encoding for buffer size:
11657 * 0x0 invalid
11658 * 0x1 4 KB
11659 * 0x2 8 KB
11660 * 0x3 16 KB
11661 * 0x4 32 KB
11662 * 0x5 64 KB
11663 * 0x6 128 KB
11664 * 0x7 256 KB
11665 * 0x8 512 KB (Receive Array only)
11666 * 0x9 1 MB (Receive Array only)
11667 * 0xa 2 MB (Receive Array only)
11668 *
11669 * 0xB-0xF - reserved (Receive Array only)
11670 *
11671 *
11672 * This routine assumes that the value has already been sanity checked.
11673 */
11674static u32 encoded_size(u32 size)
11675{
11676 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011677 case 4 * 1024: return 0x1;
11678 case 8 * 1024: return 0x2;
11679 case 16 * 1024: return 0x3;
11680 case 32 * 1024: return 0x4;
11681 case 64 * 1024: return 0x5;
11682 case 128 * 1024: return 0x6;
11683 case 256 * 1024: return 0x7;
11684 case 512 * 1024: return 0x8;
11685 case 1 * 1024 * 1024: return 0x9;
11686 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011687 }
11688 return 0x1; /* if invalid, go with the minimum size */
11689}
11690
11691void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11692{
11693 struct hfi1_ctxtdata *rcd;
11694 u64 rcvctrl, reg;
11695 int did_enable = 0;
11696
11697 rcd = dd->rcd[ctxt];
11698 if (!rcd)
11699 return;
11700
11701 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11702
11703 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11704 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011705 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11706 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011707 /* reset the tail and hdr addresses, and sequence count */
11708 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011709 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011710 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11711 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011712 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011713 rcd->seq_cnt = 1;
11714
11715 /* reset the cached receive header queue head value */
11716 rcd->head = 0;
11717
11718 /*
11719 * Zero the receive header queue so we don't get false
11720 * positives when checking the sequence number. The
11721 * sequence numbers could land exactly on the same spot.
11722 * E.g. a rcd restart before the receive header wrapped.
11723 */
11724 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11725
11726 /* starting timeout */
11727 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11728
11729 /* enable the context */
11730 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11731
11732 /* clean the egr buffer size first */
11733 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11734 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11735 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11736 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11737
11738 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11739 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11740 did_enable = 1;
11741
11742 /* zero RcvEgrIndexHead */
11743 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11744
11745 /* set eager count and base index */
11746 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11747 & RCV_EGR_CTRL_EGR_CNT_MASK)
11748 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11749 (((rcd->eager_base >> RCV_SHIFT)
11750 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11751 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11752 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11753
11754 /*
11755 * Set TID (expected) count and base index.
11756 * rcd->expected_count is set to individual RcvArray entries,
11757 * not pairs, and the CSR takes a pair-count in groups of
11758 * four, so divide by 8.
11759 */
11760 reg = (((rcd->expected_count >> RCV_SHIFT)
11761 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11762 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11763 (((rcd->expected_base >> RCV_SHIFT)
11764 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11765 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11766 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011767 if (ctxt == HFI1_CTRL_CTXT)
11768 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011769 }
11770 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11771 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011772 /*
11773 * When receive context is being disabled turn on tail
11774 * update with a dummy tail address and then disable
11775 * receive context.
11776 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011777 if (dd->rcvhdrtail_dummy_dma) {
Mark F. Brown46b010d2015-11-09 19:18:20 -050011778 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011779 dd->rcvhdrtail_dummy_dma);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011780 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011781 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11782 }
11783
Mike Marciniszyn77241052015-07-30 15:17:43 -040011784 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11785 }
11786 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11787 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11788 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11789 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011790 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011791 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011792 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11793 /* See comment on RcvCtxtCtrl.TailUpd above */
11794 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11795 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11796 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011797 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11798 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11799 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11800 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11801 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011802 /*
11803 * In one-packet-per-eager mode, the size comes from
11804 * the RcvArray entry.
11805 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011806 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11807 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11808 }
11809 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11810 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11811 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11812 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11813 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11814 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11815 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11816 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11817 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11818 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11819 rcd->rcvctrl = rcvctrl;
11820 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11821 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11822
11823 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011824 if (did_enable &&
11825 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011826 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11827 if (reg != 0) {
11828 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011829 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011830 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11831 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11832 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11833 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11834 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11835 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011836 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011837 }
11838 }
11839
11840 if (did_enable) {
11841 /*
11842 * The interrupt timeout and count must be set after
11843 * the context is enabled to take effect.
11844 */
11845 /* set interrupt timeout */
11846 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011847 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011848 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11849
11850 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11851 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11852 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11853 }
11854
11855 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11856 /*
11857 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011858 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11859 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011860 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011861 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011862 dd->rcvhdrtail_dummy_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011863}
11864
Dean Luick582e05c2016-02-18 11:13:01 -080011865u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011866{
11867 int ret;
11868 u64 val = 0;
11869
11870 if (namep) {
11871 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011872 *namep = dd->cntrnames;
11873 } else {
11874 const struct cntr_entry *entry;
11875 int i, j;
11876
11877 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011878
11879 /* Get the start of the block of counters */
11880 *cntrp = dd->cntrs;
11881
11882 /*
11883 * Now go and fill in each counter in the block.
11884 */
11885 for (i = 0; i < DEV_CNTR_LAST; i++) {
11886 entry = &dev_cntrs[i];
11887 hfi1_cdbg(CNTR, "reading %s", entry->name);
11888 if (entry->flags & CNTR_DISABLED) {
11889 /* Nothing */
11890 hfi1_cdbg(CNTR, "\tDisabled\n");
11891 } else {
11892 if (entry->flags & CNTR_VL) {
11893 hfi1_cdbg(CNTR, "\tPer VL\n");
11894 for (j = 0; j < C_VL_COUNT; j++) {
11895 val = entry->rw_cntr(entry,
11896 dd, j,
11897 CNTR_MODE_R,
11898 0);
11899 hfi1_cdbg(
11900 CNTR,
11901 "\t\tRead 0x%llx for %d\n",
11902 val, j);
11903 dd->cntrs[entry->offset + j] =
11904 val;
11905 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011906 } else if (entry->flags & CNTR_SDMA) {
11907 hfi1_cdbg(CNTR,
11908 "\t Per SDMA Engine\n");
11909 for (j = 0; j < dd->chip_sdma_engines;
11910 j++) {
11911 val =
11912 entry->rw_cntr(entry, dd, j,
11913 CNTR_MODE_R, 0);
11914 hfi1_cdbg(CNTR,
11915 "\t\tRead 0x%llx for %d\n",
11916 val, j);
11917 dd->cntrs[entry->offset + j] =
11918 val;
11919 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011920 } else {
11921 val = entry->rw_cntr(entry, dd,
11922 CNTR_INVALID_VL,
11923 CNTR_MODE_R, 0);
11924 dd->cntrs[entry->offset] = val;
11925 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11926 }
11927 }
11928 }
11929 }
11930 return ret;
11931}
11932
11933/*
11934 * Used by sysfs to create files for hfi stats to read
11935 */
Dean Luick582e05c2016-02-18 11:13:01 -080011936u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011937{
11938 int ret;
11939 u64 val = 0;
11940
11941 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011942 ret = ppd->dd->portcntrnameslen;
11943 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011944 } else {
11945 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011946 int i, j;
11947
Dean Luick582e05c2016-02-18 11:13:01 -080011948 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011949 *cntrp = ppd->cntrs;
11950
11951 for (i = 0; i < PORT_CNTR_LAST; i++) {
11952 entry = &port_cntrs[i];
11953 hfi1_cdbg(CNTR, "reading %s", entry->name);
11954 if (entry->flags & CNTR_DISABLED) {
11955 /* Nothing */
11956 hfi1_cdbg(CNTR, "\tDisabled\n");
11957 continue;
11958 }
11959
11960 if (entry->flags & CNTR_VL) {
11961 hfi1_cdbg(CNTR, "\tPer VL");
11962 for (j = 0; j < C_VL_COUNT; j++) {
11963 val = entry->rw_cntr(entry, ppd, j,
11964 CNTR_MODE_R,
11965 0);
11966 hfi1_cdbg(
11967 CNTR,
11968 "\t\tRead 0x%llx for %d",
11969 val, j);
11970 ppd->cntrs[entry->offset + j] = val;
11971 }
11972 } else {
11973 val = entry->rw_cntr(entry, ppd,
11974 CNTR_INVALID_VL,
11975 CNTR_MODE_R,
11976 0);
11977 ppd->cntrs[entry->offset] = val;
11978 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11979 }
11980 }
11981 }
11982 return ret;
11983}
11984
11985static void free_cntrs(struct hfi1_devdata *dd)
11986{
11987 struct hfi1_pportdata *ppd;
11988 int i;
11989
11990 if (dd->synth_stats_timer.data)
11991 del_timer_sync(&dd->synth_stats_timer);
11992 dd->synth_stats_timer.data = 0;
11993 ppd = (struct hfi1_pportdata *)(dd + 1);
11994 for (i = 0; i < dd->num_pports; i++, ppd++) {
11995 kfree(ppd->cntrs);
11996 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011997 free_percpu(ppd->ibport_data.rvp.rc_acks);
11998 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11999 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012000 ppd->cntrs = NULL;
12001 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080012002 ppd->ibport_data.rvp.rc_acks = NULL;
12003 ppd->ibport_data.rvp.rc_qacks = NULL;
12004 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012005 }
12006 kfree(dd->portcntrnames);
12007 dd->portcntrnames = NULL;
12008 kfree(dd->cntrs);
12009 dd->cntrs = NULL;
12010 kfree(dd->scntrs);
12011 dd->scntrs = NULL;
12012 kfree(dd->cntrnames);
12013 dd->cntrnames = NULL;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012014 if (dd->update_cntr_wq) {
12015 destroy_workqueue(dd->update_cntr_wq);
12016 dd->update_cntr_wq = NULL;
12017 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012018}
12019
Mike Marciniszyn77241052015-07-30 15:17:43 -040012020static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12021 u64 *psval, void *context, int vl)
12022{
12023 u64 val;
12024 u64 sval = *psval;
12025
12026 if (entry->flags & CNTR_DISABLED) {
12027 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12028 return 0;
12029 }
12030
12031 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12032
12033 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12034
12035 /* If its a synthetic counter there is more work we need to do */
12036 if (entry->flags & CNTR_SYNTH) {
12037 if (sval == CNTR_MAX) {
12038 /* No need to read already saturated */
12039 return CNTR_MAX;
12040 }
12041
12042 if (entry->flags & CNTR_32BIT) {
12043 /* 32bit counters can wrap multiple times */
12044 u64 upper = sval >> 32;
12045 u64 lower = (sval << 32) >> 32;
12046
12047 if (lower > val) { /* hw wrapped */
12048 if (upper == CNTR_32BIT_MAX)
12049 val = CNTR_MAX;
12050 else
12051 upper++;
12052 }
12053
12054 if (val != CNTR_MAX)
12055 val = (upper << 32) | val;
12056
12057 } else {
12058 /* If we rolled we are saturated */
12059 if ((val < sval) || (val > CNTR_MAX))
12060 val = CNTR_MAX;
12061 }
12062 }
12063
12064 *psval = val;
12065
12066 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12067
12068 return val;
12069}
12070
12071static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12072 struct cntr_entry *entry,
12073 u64 *psval, void *context, int vl, u64 data)
12074{
12075 u64 val;
12076
12077 if (entry->flags & CNTR_DISABLED) {
12078 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12079 return 0;
12080 }
12081
12082 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12083
12084 if (entry->flags & CNTR_SYNTH) {
12085 *psval = data;
12086 if (entry->flags & CNTR_32BIT) {
12087 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12088 (data << 32) >> 32);
12089 val = data; /* return the full 64bit value */
12090 } else {
12091 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12092 data);
12093 }
12094 } else {
12095 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12096 }
12097
12098 *psval = val;
12099
12100 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12101
12102 return val;
12103}
12104
12105u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12106{
12107 struct cntr_entry *entry;
12108 u64 *sval;
12109
12110 entry = &dev_cntrs[index];
12111 sval = dd->scntrs + entry->offset;
12112
12113 if (vl != CNTR_INVALID_VL)
12114 sval += vl;
12115
12116 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12117}
12118
12119u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12120{
12121 struct cntr_entry *entry;
12122 u64 *sval;
12123
12124 entry = &dev_cntrs[index];
12125 sval = dd->scntrs + entry->offset;
12126
12127 if (vl != CNTR_INVALID_VL)
12128 sval += vl;
12129
12130 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12131}
12132
12133u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12134{
12135 struct cntr_entry *entry;
12136 u64 *sval;
12137
12138 entry = &port_cntrs[index];
12139 sval = ppd->scntrs + entry->offset;
12140
12141 if (vl != CNTR_INVALID_VL)
12142 sval += vl;
12143
12144 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12145 (index <= C_RCV_HDR_OVF_LAST)) {
12146 /* We do not want to bother for disabled contexts */
12147 return 0;
12148 }
12149
12150 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12151}
12152
12153u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12154{
12155 struct cntr_entry *entry;
12156 u64 *sval;
12157
12158 entry = &port_cntrs[index];
12159 sval = ppd->scntrs + entry->offset;
12160
12161 if (vl != CNTR_INVALID_VL)
12162 sval += vl;
12163
12164 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12165 (index <= C_RCV_HDR_OVF_LAST)) {
12166 /* We do not want to bother for disabled contexts */
12167 return 0;
12168 }
12169
12170 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12171}
12172
Tadeusz Struk22546b72017-04-28 10:40:02 -070012173static void do_update_synth_timer(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012174{
12175 u64 cur_tx;
12176 u64 cur_rx;
12177 u64 total_flits;
12178 u8 update = 0;
12179 int i, j, vl;
12180 struct hfi1_pportdata *ppd;
12181 struct cntr_entry *entry;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012182 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12183 update_cntr_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012184
12185 /*
12186 * Rather than keep beating on the CSRs pick a minimal set that we can
12187 * check to watch for potential roll over. We can do this by looking at
12188 * the number of flits sent/recv. If the total flits exceeds 32bits then
12189 * we have to iterate all the counters and update.
12190 */
12191 entry = &dev_cntrs[C_DC_RCV_FLITS];
12192 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12193
12194 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12195 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12196
12197 hfi1_cdbg(
12198 CNTR,
12199 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12200 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12201
12202 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12203 /*
12204 * May not be strictly necessary to update but it won't hurt and
12205 * simplifies the logic here.
12206 */
12207 update = 1;
12208 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12209 dd->unit);
12210 } else {
12211 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12212 hfi1_cdbg(CNTR,
12213 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12214 total_flits, (u64)CNTR_32BIT_MAX);
12215 if (total_flits >= CNTR_32BIT_MAX) {
12216 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12217 dd->unit);
12218 update = 1;
12219 }
12220 }
12221
12222 if (update) {
12223 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12224 for (i = 0; i < DEV_CNTR_LAST; i++) {
12225 entry = &dev_cntrs[i];
12226 if (entry->flags & CNTR_VL) {
12227 for (vl = 0; vl < C_VL_COUNT; vl++)
12228 read_dev_cntr(dd, i, vl);
12229 } else {
12230 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12231 }
12232 }
12233 ppd = (struct hfi1_pportdata *)(dd + 1);
12234 for (i = 0; i < dd->num_pports; i++, ppd++) {
12235 for (j = 0; j < PORT_CNTR_LAST; j++) {
12236 entry = &port_cntrs[j];
12237 if (entry->flags & CNTR_VL) {
12238 for (vl = 0; vl < C_VL_COUNT; vl++)
12239 read_port_cntr(ppd, j, vl);
12240 } else {
12241 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12242 }
12243 }
12244 }
12245
12246 /*
12247 * We want the value in the register. The goal is to keep track
12248 * of the number of "ticks" not the counter value. In other
12249 * words if the register rolls we want to notice it and go ahead
12250 * and force an update.
12251 */
12252 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12253 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12254 CNTR_MODE_R, 0);
12255
12256 entry = &dev_cntrs[C_DC_RCV_FLITS];
12257 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12258 CNTR_MODE_R, 0);
12259
12260 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12261 dd->unit, dd->last_tx, dd->last_rx);
12262
12263 } else {
12264 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12265 }
Tadeusz Struk22546b72017-04-28 10:40:02 -070012266}
Mike Marciniszyn77241052015-07-30 15:17:43 -040012267
Tadeusz Struk22546b72017-04-28 10:40:02 -070012268static void update_synth_timer(unsigned long opaque)
12269{
12270 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12271
12272 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
Bart Van Assche48a0cc132016-06-03 12:09:56 -070012273 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012274}
12275
Jianxin Xiong09a79082016-10-25 13:12:40 -070012276#define C_MAX_NAME 16 /* 15 chars + one for /0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012277static int init_cntrs(struct hfi1_devdata *dd)
12278{
Dean Luickc024c552016-01-11 18:30:57 -050012279 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012280 size_t sz;
12281 char *p;
12282 char name[C_MAX_NAME];
12283 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012284 const char *bit_type_32 = ",32";
12285 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012286
12287 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053012288 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12289 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012290
12291 /***********************/
12292 /* per device counters */
12293 /***********************/
12294
12295 /* size names and determine how many we have*/
12296 dd->ndevcntrs = 0;
12297 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012298
12299 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012300 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12301 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12302 continue;
12303 }
12304
12305 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050012306 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012307 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012308 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012309 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012310 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012311 /* Add ",32" for 32-bit counters */
12312 if (dev_cntrs[i].flags & CNTR_32BIT)
12313 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012314 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012315 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012316 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012317 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050012318 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012319 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012320 snprintf(name, C_MAX_NAME, "%s%d",
12321 dev_cntrs[i].name, j);
12322 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012323 /* Add ",32" for 32-bit counters */
12324 if (dev_cntrs[i].flags & CNTR_32BIT)
12325 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012326 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012327 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012328 }
12329 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012330 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012331 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012332 /* Add ",32" for 32-bit counters */
12333 if (dev_cntrs[i].flags & CNTR_32BIT)
12334 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012335 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012336 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012337 }
12338 }
12339
12340 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012341 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012342 if (!dd->cntrs)
12343 goto bail;
12344
Dean Luickc024c552016-01-11 18:30:57 -050012345 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012346 if (!dd->scntrs)
12347 goto bail;
12348
Mike Marciniszyn77241052015-07-30 15:17:43 -040012349 /* allocate space for the counter names */
12350 dd->cntrnameslen = sz;
12351 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12352 if (!dd->cntrnames)
12353 goto bail;
12354
12355 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012356 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012357 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12358 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012359 } else if (dev_cntrs[i].flags & CNTR_VL) {
12360 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012361 snprintf(name, C_MAX_NAME, "%s%d",
12362 dev_cntrs[i].name,
12363 vl_from_idx(j));
12364 memcpy(p, name, strlen(name));
12365 p += strlen(name);
12366
12367 /* Counter is 32 bits */
12368 if (dev_cntrs[i].flags & CNTR_32BIT) {
12369 memcpy(p, bit_type_32, bit_type_32_sz);
12370 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012371 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012372
Mike Marciniszyn77241052015-07-30 15:17:43 -040012373 *p++ = '\n';
12374 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012375 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12376 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012377 snprintf(name, C_MAX_NAME, "%s%d",
12378 dev_cntrs[i].name, j);
12379 memcpy(p, name, strlen(name));
12380 p += strlen(name);
12381
12382 /* Counter is 32 bits */
12383 if (dev_cntrs[i].flags & CNTR_32BIT) {
12384 memcpy(p, bit_type_32, bit_type_32_sz);
12385 p += bit_type_32_sz;
12386 }
12387
12388 *p++ = '\n';
12389 }
12390 } else {
12391 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12392 p += strlen(dev_cntrs[i].name);
12393
12394 /* Counter is 32 bits */
12395 if (dev_cntrs[i].flags & CNTR_32BIT) {
12396 memcpy(p, bit_type_32, bit_type_32_sz);
12397 p += bit_type_32_sz;
12398 }
12399
12400 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012401 }
12402 }
12403
12404 /*********************/
12405 /* per port counters */
12406 /*********************/
12407
12408 /*
12409 * Go through the counters for the overflows and disable the ones we
12410 * don't need. This varies based on platform so we need to do it
12411 * dynamically here.
12412 */
12413 rcv_ctxts = dd->num_rcv_contexts;
12414 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12415 i <= C_RCV_HDR_OVF_LAST; i++) {
12416 port_cntrs[i].flags |= CNTR_DISABLED;
12417 }
12418
12419 /* size port counter names and determine how many we have*/
12420 sz = 0;
12421 dd->nportcntrs = 0;
12422 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012423 if (port_cntrs[i].flags & CNTR_DISABLED) {
12424 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12425 continue;
12426 }
12427
12428 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012429 port_cntrs[i].offset = dd->nportcntrs;
12430 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012431 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012432 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012433 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012434 /* Add ",32" for 32-bit counters */
12435 if (port_cntrs[i].flags & CNTR_32BIT)
12436 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012437 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012438 dd->nportcntrs++;
12439 }
12440 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012441 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012442 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012443 /* Add ",32" for 32-bit counters */
12444 if (port_cntrs[i].flags & CNTR_32BIT)
12445 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012446 port_cntrs[i].offset = dd->nportcntrs;
12447 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012448 }
12449 }
12450
12451 /* allocate space for the counter names */
12452 dd->portcntrnameslen = sz;
12453 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12454 if (!dd->portcntrnames)
12455 goto bail;
12456
12457 /* fill in port cntr names */
12458 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12459 if (port_cntrs[i].flags & CNTR_DISABLED)
12460 continue;
12461
12462 if (port_cntrs[i].flags & CNTR_VL) {
12463 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012464 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012465 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012466 memcpy(p, name, strlen(name));
12467 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012468
12469 /* Counter is 32 bits */
12470 if (port_cntrs[i].flags & CNTR_32BIT) {
12471 memcpy(p, bit_type_32, bit_type_32_sz);
12472 p += bit_type_32_sz;
12473 }
12474
Mike Marciniszyn77241052015-07-30 15:17:43 -040012475 *p++ = '\n';
12476 }
12477 } else {
12478 memcpy(p, port_cntrs[i].name,
12479 strlen(port_cntrs[i].name));
12480 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012481
12482 /* Counter is 32 bits */
12483 if (port_cntrs[i].flags & CNTR_32BIT) {
12484 memcpy(p, bit_type_32, bit_type_32_sz);
12485 p += bit_type_32_sz;
12486 }
12487
Mike Marciniszyn77241052015-07-30 15:17:43 -040012488 *p++ = '\n';
12489 }
12490 }
12491
12492 /* allocate per port storage for counter values */
12493 ppd = (struct hfi1_pportdata *)(dd + 1);
12494 for (i = 0; i < dd->num_pports; i++, ppd++) {
12495 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12496 if (!ppd->cntrs)
12497 goto bail;
12498
12499 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12500 if (!ppd->scntrs)
12501 goto bail;
12502 }
12503
12504 /* CPU counters need to be allocated and zeroed */
12505 if (init_cpu_counters(dd))
12506 goto bail;
12507
Tadeusz Struk22546b72017-04-28 10:40:02 -070012508 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12509 WQ_MEM_RECLAIM, dd->unit);
12510 if (!dd->update_cntr_wq)
12511 goto bail;
12512
12513 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12514
Mike Marciniszyn77241052015-07-30 15:17:43 -040012515 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12516 return 0;
12517bail:
12518 free_cntrs(dd);
12519 return -ENOMEM;
12520}
12521
Mike Marciniszyn77241052015-07-30 15:17:43 -040012522static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12523{
12524 switch (chip_lstate) {
12525 default:
12526 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012527 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12528 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012529 /* fall through */
12530 case LSTATE_DOWN:
12531 return IB_PORT_DOWN;
12532 case LSTATE_INIT:
12533 return IB_PORT_INIT;
12534 case LSTATE_ARMED:
12535 return IB_PORT_ARMED;
12536 case LSTATE_ACTIVE:
12537 return IB_PORT_ACTIVE;
12538 }
12539}
12540
12541u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12542{
12543 /* look at the HFI meta-states only */
12544 switch (chip_pstate & 0xf0) {
12545 default:
12546 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012547 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012548 /* fall through */
12549 case PLS_DISABLED:
12550 return IB_PORTPHYSSTATE_DISABLED;
12551 case PLS_OFFLINE:
12552 return OPA_PORTPHYSSTATE_OFFLINE;
12553 case PLS_POLLING:
12554 return IB_PORTPHYSSTATE_POLLING;
12555 case PLS_CONFIGPHY:
12556 return IB_PORTPHYSSTATE_TRAINING;
12557 case PLS_LINKUP:
12558 return IB_PORTPHYSSTATE_LINKUP;
12559 case PLS_PHYTEST:
12560 return IB_PORTPHYSSTATE_PHY_TEST;
12561 }
12562}
12563
12564/* return the OPA port logical state name */
12565const char *opa_lstate_name(u32 lstate)
12566{
12567 static const char * const port_logical_names[] = {
12568 "PORT_NOP",
12569 "PORT_DOWN",
12570 "PORT_INIT",
12571 "PORT_ARMED",
12572 "PORT_ACTIVE",
12573 "PORT_ACTIVE_DEFER",
12574 };
12575 if (lstate < ARRAY_SIZE(port_logical_names))
12576 return port_logical_names[lstate];
12577 return "unknown";
12578}
12579
12580/* return the OPA port physical state name */
12581const char *opa_pstate_name(u32 pstate)
12582{
12583 static const char * const port_physical_names[] = {
12584 "PHYS_NOP",
12585 "reserved1",
12586 "PHYS_POLL",
12587 "PHYS_DISABLED",
12588 "PHYS_TRAINING",
12589 "PHYS_LINKUP",
12590 "PHYS_LINK_ERR_RECOVER",
12591 "PHYS_PHY_TEST",
12592 "reserved8",
12593 "PHYS_OFFLINE",
12594 "PHYS_GANGED",
12595 "PHYS_TEST",
12596 };
12597 if (pstate < ARRAY_SIZE(port_physical_names))
12598 return port_physical_names[pstate];
12599 return "unknown";
12600}
12601
12602/*
12603 * Read the hardware link state and set the driver's cached value of it.
12604 * Return the (new) current value.
12605 */
12606u32 get_logical_state(struct hfi1_pportdata *ppd)
12607{
12608 u32 new_state;
12609
12610 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12611 if (new_state != ppd->lstate) {
12612 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012613 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012614 ppd->lstate = new_state;
12615 }
12616 /*
12617 * Set port status flags in the page mapped into userspace
12618 * memory. Do it here to ensure a reliable state - this is
12619 * the only function called by all state handling code.
12620 * Always set the flags due to the fact that the cache value
12621 * might have been changed explicitly outside of this
12622 * function.
12623 */
12624 if (ppd->statusp) {
12625 switch (ppd->lstate) {
12626 case IB_PORT_DOWN:
12627 case IB_PORT_INIT:
12628 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12629 HFI1_STATUS_IB_READY);
12630 break;
12631 case IB_PORT_ARMED:
12632 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12633 break;
12634 case IB_PORT_ACTIVE:
12635 *ppd->statusp |= HFI1_STATUS_IB_READY;
12636 break;
12637 }
12638 }
12639 return ppd->lstate;
12640}
12641
12642/**
12643 * wait_logical_linkstate - wait for an IB link state change to occur
12644 * @ppd: port device
12645 * @state: the state to wait for
12646 * @msecs: the number of milliseconds to wait
12647 *
12648 * Wait up to msecs milliseconds for IB link state change to occur.
12649 * For now, take the easy polling route.
12650 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12651 */
12652static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12653 int msecs)
12654{
12655 unsigned long timeout;
12656
12657 timeout = jiffies + msecs_to_jiffies(msecs);
12658 while (1) {
12659 if (get_logical_state(ppd) == state)
12660 return 0;
12661 if (time_after(jiffies, timeout))
12662 break;
12663 msleep(20);
12664 }
12665 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12666
12667 return -ETIMEDOUT;
12668}
12669
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012670/*
12671 * Read the physical hardware link state and set the driver's cached value
12672 * of it.
12673 */
12674void cache_physical_state(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012675{
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012676 u32 read_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012677 u32 ib_pstate;
12678
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012679 read_pstate = read_physical_state(ppd->dd);
12680 ib_pstate = chip_to_opa_pstate(ppd->dd, read_pstate);
12681 /* check if OPA pstate changed */
12682 if (chip_to_opa_pstate(ppd->dd, ppd->pstate) != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012683 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012684 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12685 __func__, opa_pstate_name(ib_pstate), ib_pstate,
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012686 read_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012687 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012688 ppd->pstate = read_pstate;
12689}
12690
12691/*
12692 * wait_physical_linkstate - wait for an physical link state change to occur
12693 * @ppd: port device
12694 * @state: the state to wait for
12695 * @msecs: the number of milliseconds to wait
12696 *
12697 * Wait up to msecs milliseconds for physical link state change to occur.
12698 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12699 */
12700static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12701 int msecs)
12702{
12703 unsigned long timeout;
12704
12705 timeout = jiffies + msecs_to_jiffies(msecs);
12706 while (1) {
12707 cache_physical_state(ppd);
12708 if (ppd->pstate == state)
12709 break;
12710 if (time_after(jiffies, timeout)) {
12711 dd_dev_err(ppd->dd,
12712 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
12713 state, ppd->pstate);
12714 return -ETIMEDOUT;
12715 }
12716 usleep_range(1950, 2050); /* sleep 2ms-ish */
12717 }
12718
12719 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012720}
12721
Mike Marciniszyn77241052015-07-30 15:17:43 -040012722#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12723(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12724
12725#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12726(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12727
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070012728void hfi1_init_ctxt(struct send_context *sc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012729{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012730 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012731 struct hfi1_devdata *dd = sc->dd;
12732 u64 reg;
12733 u8 set = (sc->type == SC_USER ?
12734 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12735 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12736 reg = read_kctxt_csr(dd, sc->hw_context,
12737 SEND_CTXT_CHECK_ENABLE);
12738 if (set)
12739 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12740 else
12741 SET_STATIC_RATE_CONTROL_SMASK(reg);
12742 write_kctxt_csr(dd, sc->hw_context,
12743 SEND_CTXT_CHECK_ENABLE, reg);
12744 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012745}
12746
12747int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12748{
12749 int ret = 0;
12750 u64 reg;
12751
12752 if (dd->icode != ICODE_RTL_SILICON) {
12753 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12754 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12755 __func__);
12756 return -EINVAL;
12757 }
12758 reg = read_csr(dd, ASIC_STS_THERM);
12759 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12760 ASIC_STS_THERM_CURR_TEMP_MASK);
12761 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12762 ASIC_STS_THERM_LO_TEMP_MASK);
12763 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12764 ASIC_STS_THERM_HI_TEMP_MASK);
12765 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12766 ASIC_STS_THERM_CRIT_TEMP_MASK);
12767 /* triggers is a 3-bit value - 1 bit per trigger. */
12768 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12769
12770 return ret;
12771}
12772
12773/* ========================================================================= */
12774
12775/*
12776 * Enable/disable chip from delivering interrupts.
12777 */
12778void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12779{
12780 int i;
12781
12782 /*
12783 * In HFI, the mask needs to be 1 to allow interrupts.
12784 */
12785 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012786 /* enable all interrupts */
12787 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012788 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012789
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012790 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012791 } else {
12792 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012793 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012794 }
12795}
12796
12797/*
12798 * Clear all interrupt sources on the chip.
12799 */
12800static void clear_all_interrupts(struct hfi1_devdata *dd)
12801{
12802 int i;
12803
12804 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012805 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012806
12807 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12808 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12809 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12810 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12811 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12812 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12813 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12814 for (i = 0; i < dd->chip_send_contexts; i++)
12815 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12816 for (i = 0; i < dd->chip_sdma_engines; i++)
12817 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12818
12819 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12820 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12821 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12822}
12823
12824/* Move to pcie.c? */
12825static void disable_intx(struct pci_dev *pdev)
12826{
12827 pci_intx(pdev, 0);
12828}
12829
12830static void clean_up_interrupts(struct hfi1_devdata *dd)
12831{
12832 int i;
12833
12834 /* remove irqs - must happen before disabling/turning off */
12835 if (dd->num_msix_entries) {
12836 /* MSI-X */
12837 struct hfi1_msix_entry *me = dd->msix_entries;
12838
12839 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012840 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012841 continue;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012842 hfi1_put_irq_affinity(dd, me);
12843 free_irq(me->irq, me->arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012844 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012845
12846 /* clean structures */
12847 kfree(dd->msix_entries);
12848 dd->msix_entries = NULL;
12849 dd->num_msix_entries = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012850 } else {
12851 /* INTx */
12852 if (dd->requested_intx_irq) {
12853 free_irq(dd->pcidev->irq, dd);
12854 dd->requested_intx_irq = 0;
12855 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012856 disable_intx(dd->pcidev);
12857 }
12858
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012859 pci_free_irq_vectors(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012860}
12861
12862/*
12863 * Remap the interrupt source from the general handler to the given MSI-X
12864 * interrupt.
12865 */
12866static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12867{
12868 u64 reg;
12869 int m, n;
12870
12871 /* clear from the handled mask of the general interrupt */
12872 m = isrc / 64;
12873 n = isrc % 64;
Dennis Dalessandrobc54f672017-05-29 17:18:14 -070012874 if (likely(m < CCE_NUM_INT_CSRS)) {
12875 dd->gi_mask[m] &= ~((u64)1 << n);
12876 } else {
12877 dd_dev_err(dd, "remap interrupt err\n");
12878 return;
12879 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012880
12881 /* direct the chip source to the given MSI-X interrupt */
12882 m = isrc / 8;
12883 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012884 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12885 reg &= ~((u64)0xff << (8 * n));
12886 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12887 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012888}
12889
12890static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12891 int engine, int msix_intr)
12892{
12893 /*
12894 * SDMA engine interrupt sources grouped by type, rather than
12895 * engine. Per-engine interrupts are as follows:
12896 * SDMA
12897 * SDMAProgress
12898 * SDMAIdle
12899 */
Jubin John8638b772016-02-14 20:19:24 -080012900 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012901 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012902 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012903 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012904 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012905 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012906}
12907
Mike Marciniszyn77241052015-07-30 15:17:43 -040012908static int request_intx_irq(struct hfi1_devdata *dd)
12909{
12910 int ret;
12911
Jubin John98050712015-11-16 21:59:27 -050012912 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12913 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012914 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012915 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012916 if (ret)
12917 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012918 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012919 else
12920 dd->requested_intx_irq = 1;
12921 return ret;
12922}
12923
12924static int request_msix_irqs(struct hfi1_devdata *dd)
12925{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012926 int first_general, last_general;
12927 int first_sdma, last_sdma;
12928 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012929 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012930
12931 /* calculate the ranges we are going to use */
12932 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012933 last_general = first_general + 1;
12934 first_sdma = last_general;
12935 last_sdma = first_sdma + dd->num_sdma;
12936 first_rx = last_sdma;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012937 last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
12938
12939 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
12940 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012941
12942 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012943 * Sanity check - the code expects all SDMA chip source
12944 * interrupts to be in the same CSR, starting at bit 0. Verify
12945 * that this is true by checking the bit location of the start.
12946 */
12947 BUILD_BUG_ON(IS_SDMA_START % 64);
12948
12949 for (i = 0; i < dd->num_msix_entries; i++) {
12950 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12951 const char *err_info;
12952 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012953 irq_handler_t thread = NULL;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012954 void *arg = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012955 int idx;
12956 struct hfi1_ctxtdata *rcd = NULL;
12957 struct sdma_engine *sde = NULL;
12958
12959 /* obtain the arguments to request_irq */
12960 if (first_general <= i && i < last_general) {
12961 idx = i - first_general;
12962 handler = general_interrupt;
12963 arg = dd;
12964 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012965 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012966 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012967 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012968 } else if (first_sdma <= i && i < last_sdma) {
12969 idx = i - first_sdma;
12970 sde = &dd->per_sdma[idx];
12971 handler = sdma_interrupt;
12972 arg = sde;
12973 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012974 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012975 err_info = "sdma";
12976 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012977 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012978 } else if (first_rx <= i && i < last_rx) {
12979 idx = i - first_rx;
12980 rcd = dd->rcd[idx];
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012981 if (rcd) {
12982 /*
12983 * Set the interrupt register and mask for this
12984 * context's interrupt.
12985 */
12986 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12987 rcd->imask = ((u64)1) <<
12988 ((IS_RCVAVAIL_START + idx) % 64);
12989 handler = receive_context_interrupt;
12990 thread = receive_context_thread;
12991 arg = rcd;
12992 snprintf(me->name, sizeof(me->name),
12993 DRIVER_NAME "_%d kctxt%d",
12994 dd->unit, idx);
12995 err_info = "receive context";
12996 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12997 me->type = IRQ_RCVCTXT;
12998 rcd->msix_intr = i;
12999 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013000 } else {
13001 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080013002 * ignore it
13003 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013004 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013005 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013006 continue;
13007 }
13008 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080013009 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013010 continue;
13011 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080013012 me->name[sizeof(me->name) - 1] = 0;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013013 me->irq = pci_irq_vector(dd->pcidev, i);
13014 /*
13015 * On err return me->irq. Don't need to clear this
13016 * because 'arg' has not been set, and cleanup will
13017 * do the right thing.
13018 */
13019 if (me->irq < 0)
13020 return me->irq;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013021
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013022 ret = request_threaded_irq(me->irq, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080013023 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013024 if (ret) {
13025 dd_dev_err(dd,
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013026 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13027 err_info, me->irq, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013028 return ret;
13029 }
13030 /*
13031 * assign arg after request_irq call, so it will be
13032 * cleaned up
13033 */
13034 me->arg = arg;
13035
Mitko Haralanov957558c2016-02-03 14:33:40 -080013036 ret = hfi1_get_irq_affinity(dd, me);
13037 if (ret)
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013038 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013039 }
13040
Mike Marciniszyn77241052015-07-30 15:17:43 -040013041 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013042}
13043
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013044void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13045{
13046 int i;
13047
13048 if (!dd->num_msix_entries) {
13049 synchronize_irq(dd->pcidev->irq);
13050 return;
13051 }
13052
13053 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13054 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13055 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13056
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013057 synchronize_irq(me->irq);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013058 }
13059}
13060
13061void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13062{
13063 struct hfi1_devdata *dd = rcd->dd;
13064 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13065
13066 if (!me->arg) /* => no irq, no affinity */
13067 return;
13068
13069 hfi1_put_irq_affinity(dd, me);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013070 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013071
13072 me->arg = NULL;
13073}
13074
13075void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13076{
13077 struct hfi1_devdata *dd = rcd->dd;
13078 struct hfi1_msix_entry *me;
13079 int idx = rcd->ctxt;
13080 void *arg = rcd;
13081 int ret;
13082
13083 rcd->msix_intr = dd->vnic.msix_idx++;
13084 me = &dd->msix_entries[rcd->msix_intr];
13085
13086 /*
13087 * Set the interrupt register and mask for this
13088 * context's interrupt.
13089 */
13090 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13091 rcd->imask = ((u64)1) <<
13092 ((IS_RCVAVAIL_START + idx) % 64);
13093
13094 snprintf(me->name, sizeof(me->name),
13095 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13096 me->name[sizeof(me->name) - 1] = 0;
13097 me->type = IRQ_RCVCTXT;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013098 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13099 if (me->irq < 0) {
13100 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13101 idx, me->irq);
13102 return;
13103 }
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013104 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13105
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013106 ret = request_threaded_irq(me->irq, receive_context_interrupt,
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013107 receive_context_thread, 0, me->name, arg);
13108 if (ret) {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013109 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13110 me->irq, idx, ret);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013111 return;
13112 }
13113 /*
13114 * assign arg after request_irq call, so it will be
13115 * cleaned up
13116 */
13117 me->arg = arg;
13118
13119 ret = hfi1_get_irq_affinity(dd, me);
13120 if (ret) {
13121 dd_dev_err(dd,
13122 "unable to pin IRQ %d\n", ret);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013123 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013124 }
13125}
13126
Mike Marciniszyn77241052015-07-30 15:17:43 -040013127/*
13128 * Set the general handler to accept all interrupts, remap all
13129 * chip interrupts back to MSI-X 0.
13130 */
13131static void reset_interrupts(struct hfi1_devdata *dd)
13132{
13133 int i;
13134
13135 /* all interrupts handled by the general handler */
13136 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13137 dd->gi_mask[i] = ~(u64)0;
13138
13139 /* all chip interrupts map to MSI-X 0 */
13140 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013141 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013142}
13143
13144static int set_up_interrupts(struct hfi1_devdata *dd)
13145{
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013146 u32 total;
13147 int ret, request;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013148 int single_interrupt = 0; /* we expect to have all the interrupts */
13149
13150 /*
13151 * Interrupt count:
13152 * 1 general, "slow path" interrupt (includes the SDMA engines
13153 * slow source, SDMACleanupDone)
13154 * N interrupts - one per used SDMA engine
13155 * M interrupt - one per kernel receive context
13156 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013157 total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013158
Mike Marciniszyn77241052015-07-30 15:17:43 -040013159 /* ask for MSI-X interrupts */
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013160 request = request_msix(dd, total);
13161 if (request < 0) {
13162 ret = request;
13163 goto fail;
13164 } else if (request == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013165 /* using INTx */
13166 /* dd->num_msix_entries already zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013167 single_interrupt = 1;
13168 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013169 } else if (request < total) {
13170 /* using MSI-X, with reduced interrupts */
13171 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13172 total, request);
13173 ret = -EINVAL;
13174 goto fail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013175 } else {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013176 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13177 GFP_KERNEL);
13178 if (!dd->msix_entries) {
13179 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013180 goto fail;
13181 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013182 /* using MSI-X */
13183 dd->num_msix_entries = total;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013184 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13185 }
13186
13187 /* mask all interrupts */
13188 set_intr_state(dd, 0);
13189 /* clear all pending interrupts */
13190 clear_all_interrupts(dd);
13191
13192 /* reset general handler mask, chip MSI-X mappings */
13193 reset_interrupts(dd);
13194
13195 if (single_interrupt)
13196 ret = request_intx_irq(dd);
13197 else
13198 ret = request_msix_irqs(dd);
13199 if (ret)
13200 goto fail;
13201
13202 return 0;
13203
13204fail:
13205 clean_up_interrupts(dd);
13206 return ret;
13207}
13208
13209/*
13210 * Set up context values in dd. Sets:
13211 *
13212 * num_rcv_contexts - number of contexts being used
13213 * n_krcv_queues - number of kernel contexts
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013214 * first_dyn_alloc_ctxt - first dynamically allocated context
13215 * in array of contexts
Mike Marciniszyn77241052015-07-30 15:17:43 -040013216 * freectxts - number of free user contexts
13217 * num_send_contexts - number of PIO send contexts being used
13218 */
13219static int set_up_context_variables(struct hfi1_devdata *dd)
13220{
Harish Chegondi429b6a72016-08-31 07:24:40 -070013221 unsigned long num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013222 int total_contexts;
13223 int ret;
13224 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070013225 int qos_rmt_count;
13226 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013227
13228 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013229 * Kernel receive contexts:
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013230 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070013231 * - Context 1 - first kernel context
13232 * - Context 2 - second kernel context
13233 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040013234 */
13235 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013236 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013237 * n_krcvqs is the sum of module parameter kernel receive
13238 * contexts, krcvqs[]. It does not include the control
13239 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013240 */
Dean Luick33a9eb52016-04-12 10:50:22 -070013241 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013242 else
Harish Chegondi8784ac02016-07-25 13:38:50 -070013243 num_kernel_contexts = DEFAULT_KRCVQS + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013244 /*
13245 * Every kernel receive context needs an ACK send context.
13246 * one send context is allocated for each VL{0-7} and VL15
13247 */
13248 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13249 dd_dev_err(dd,
Harish Chegondi429b6a72016-08-31 07:24:40 -070013250 "Reducing # kernel rcv contexts to: %d, from %lu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013251 (int)(dd->chip_send_contexts - num_vls - 1),
Harish Chegondi429b6a72016-08-31 07:24:40 -070013252 num_kernel_contexts);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013253 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13254 }
13255 /*
Jubin John0852d242016-04-12 11:30:08 -070013256 * User contexts:
13257 * - default to 1 user context per real (non-HT) CPU core if
13258 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040013259 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050013260 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070013261 num_user_contexts =
Dennis Dalessandro41973442016-07-25 07:52:36 -070013262 cpumask_weight(&node_affinity.real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013263
13264 total_contexts = num_kernel_contexts + num_user_contexts;
13265
13266 /*
13267 * Adjust the counts given a global max.
13268 */
13269 if (total_contexts > dd->chip_rcv_contexts) {
13270 dd_dev_err(dd,
13271 "Reducing # user receive contexts to: %d, from %d\n",
13272 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13273 (int)num_user_contexts);
13274 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13275 /* recalculate */
13276 total_contexts = num_kernel_contexts + num_user_contexts;
13277 }
13278
Dean Luick8f000f72016-04-12 11:32:06 -070013279 /* each user context requires an entry in the RMT */
13280 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13281 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13282 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13283 dd_dev_err(dd,
13284 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13285 (int)num_user_contexts,
13286 user_rmt_reduced);
13287 /* recalculate */
13288 num_user_contexts = user_rmt_reduced;
13289 total_contexts = num_kernel_contexts + num_user_contexts;
13290 }
13291
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013292 /* Accommodate VNIC contexts */
13293 if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13294 total_contexts += HFI1_NUM_VNIC_CTXT;
13295
13296 /* the first N are kernel contexts, the rest are user/vnic contexts */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013297 dd->num_rcv_contexts = total_contexts;
13298 dd->n_krcv_queues = num_kernel_contexts;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013299 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013300 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013301 dd->freectxts = num_user_contexts;
13302 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013303 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13304 (int)dd->chip_rcv_contexts,
13305 (int)dd->num_rcv_contexts,
13306 (int)dd->n_krcv_queues,
13307 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013308
13309 /*
13310 * Receive array allocation:
13311 * All RcvArray entries are divided into groups of 8. This
13312 * is required by the hardware and will speed up writes to
13313 * consecutive entries by using write-combining of the entire
13314 * cacheline.
13315 *
13316 * The number of groups are evenly divided among all contexts.
13317 * any left over groups will be given to the first N user
13318 * contexts.
13319 */
13320 dd->rcv_entries.group_size = RCV_INCREMENT;
13321 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13322 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13323 dd->rcv_entries.nctxt_extra = ngroups -
13324 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13325 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13326 dd->rcv_entries.ngroups,
13327 dd->rcv_entries.nctxt_extra);
13328 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13329 MAX_EAGER_ENTRIES * 2) {
13330 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13331 dd->rcv_entries.group_size;
13332 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013333 "RcvArray group count too high, change to %u\n",
13334 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013335 dd->rcv_entries.nctxt_extra = 0;
13336 }
13337 /*
13338 * PIO send contexts
13339 */
13340 ret = init_sc_pools_and_sizes(dd);
13341 if (ret >= 0) { /* success */
13342 dd->num_send_contexts = ret;
13343 dd_dev_info(
13344 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013345 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013346 dd->chip_send_contexts,
13347 dd->num_send_contexts,
13348 dd->sc_sizes[SC_KERNEL].count,
13349 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013350 dd->sc_sizes[SC_USER].count,
13351 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013352 ret = 0; /* success */
13353 }
13354
13355 return ret;
13356}
13357
13358/*
13359 * Set the device/port partition key table. The MAD code
13360 * will ensure that, at least, the partial management
13361 * partition key is present in the table.
13362 */
13363static void set_partition_keys(struct hfi1_pportdata *ppd)
13364{
13365 struct hfi1_devdata *dd = ppd->dd;
13366 u64 reg = 0;
13367 int i;
13368
13369 dd_dev_info(dd, "Setting partition keys\n");
13370 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13371 reg |= (ppd->pkeys[i] &
13372 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13373 ((i % 4) *
13374 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13375 /* Each register holds 4 PKey values. */
13376 if ((i % 4) == 3) {
13377 write_csr(dd, RCV_PARTITION_KEY +
13378 ((i - 3) * 2), reg);
13379 reg = 0;
13380 }
13381 }
13382
13383 /* Always enable HW pkeys check when pkeys table is set */
13384 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13385}
13386
13387/*
13388 * These CSRs and memories are uninitialized on reset and must be
13389 * written before reading to set the ECC/parity bits.
13390 *
13391 * NOTE: All user context CSRs that are not mmaped write-only
13392 * (e.g. the TID flows) must be initialized even if the driver never
13393 * reads them.
13394 */
13395static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13396{
13397 int i, j;
13398
13399 /* CceIntMap */
13400 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013401 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013402
13403 /* SendCtxtCreditReturnAddr */
13404 for (i = 0; i < dd->chip_send_contexts; i++)
13405 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13406
13407 /* PIO Send buffers */
13408 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080013409 /*
13410 * These are not normally read, and (presently) have no method
13411 * to be read, so are not pre-initialized
13412 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013413
13414 /* RcvHdrAddr */
13415 /* RcvHdrTailAddr */
13416 /* RcvTidFlowTable */
13417 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13418 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13419 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13420 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080013421 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013422 }
13423
13424 /* RcvArray */
13425 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080013426 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013427 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013428
13429 /* RcvQPMapTable */
13430 for (i = 0; i < 32; i++)
13431 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13432}
13433
13434/*
13435 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13436 */
13437static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13438 u64 ctrl_bits)
13439{
13440 unsigned long timeout;
13441 u64 reg;
13442
13443 /* is the condition present? */
13444 reg = read_csr(dd, CCE_STATUS);
13445 if ((reg & status_bits) == 0)
13446 return;
13447
13448 /* clear the condition */
13449 write_csr(dd, CCE_CTRL, ctrl_bits);
13450
13451 /* wait for the condition to clear */
13452 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13453 while (1) {
13454 reg = read_csr(dd, CCE_STATUS);
13455 if ((reg & status_bits) == 0)
13456 return;
13457 if (time_after(jiffies, timeout)) {
13458 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013459 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13460 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013461 return;
13462 }
13463 udelay(1);
13464 }
13465}
13466
13467/* set CCE CSRs to chip reset defaults */
13468static void reset_cce_csrs(struct hfi1_devdata *dd)
13469{
13470 int i;
13471
13472 /* CCE_REVISION read-only */
13473 /* CCE_REVISION2 read-only */
13474 /* CCE_CTRL - bits clear automatically */
13475 /* CCE_STATUS read-only, use CceCtrl to clear */
13476 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13477 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13478 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13479 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13480 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13481 /* CCE_ERR_STATUS read-only */
13482 write_csr(dd, CCE_ERR_MASK, 0);
13483 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13484 /* CCE_ERR_FORCE leave alone */
13485 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13486 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13487 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13488 /* CCE_PCIE_CTRL leave alone */
13489 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13490 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13491 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013492 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013493 }
13494 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13495 /* CCE_MSIX_PBA read-only */
13496 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13497 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13498 }
13499 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13500 write_csr(dd, CCE_INT_MAP, 0);
13501 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13502 /* CCE_INT_STATUS read-only */
13503 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13504 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13505 /* CCE_INT_FORCE leave alone */
13506 /* CCE_INT_BLOCKED read-only */
13507 }
13508 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13509 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13510}
13511
Mike Marciniszyn77241052015-07-30 15:17:43 -040013512/* set MISC CSRs to chip reset defaults */
13513static void reset_misc_csrs(struct hfi1_devdata *dd)
13514{
13515 int i;
13516
13517 for (i = 0; i < 32; i++) {
13518 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13519 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13520 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13521 }
Jubin John4d114fd2016-02-14 20:21:43 -080013522 /*
13523 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13524 * only be written 128-byte chunks
13525 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013526 /* init RSA engine to clear lingering errors */
13527 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13528 write_csr(dd, MISC_CFG_RSA_MU, 0);
13529 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13530 /* MISC_STS_8051_DIGEST read-only */
13531 /* MISC_STS_SBM_DIGEST read-only */
13532 /* MISC_STS_PCIE_DIGEST read-only */
13533 /* MISC_STS_FAB_DIGEST read-only */
13534 /* MISC_ERR_STATUS read-only */
13535 write_csr(dd, MISC_ERR_MASK, 0);
13536 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13537 /* MISC_ERR_FORCE leave alone */
13538}
13539
13540/* set TXE CSRs to chip reset defaults */
13541static void reset_txe_csrs(struct hfi1_devdata *dd)
13542{
13543 int i;
13544
13545 /*
13546 * TXE Kernel CSRs
13547 */
13548 write_csr(dd, SEND_CTRL, 0);
13549 __cm_reset(dd, 0); /* reset CM internal state */
13550 /* SEND_CONTEXTS read-only */
13551 /* SEND_DMA_ENGINES read-only */
13552 /* SEND_PIO_MEM_SIZE read-only */
13553 /* SEND_DMA_MEM_SIZE read-only */
13554 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13555 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13556 /* SEND_PIO_ERR_STATUS read-only */
13557 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13558 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13559 /* SEND_PIO_ERR_FORCE leave alone */
13560 /* SEND_DMA_ERR_STATUS read-only */
13561 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13562 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13563 /* SEND_DMA_ERR_FORCE leave alone */
13564 /* SEND_EGRESS_ERR_STATUS read-only */
13565 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13566 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13567 /* SEND_EGRESS_ERR_FORCE leave alone */
13568 write_csr(dd, SEND_BTH_QP, 0);
13569 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13570 write_csr(dd, SEND_SC2VLT0, 0);
13571 write_csr(dd, SEND_SC2VLT1, 0);
13572 write_csr(dd, SEND_SC2VLT2, 0);
13573 write_csr(dd, SEND_SC2VLT3, 0);
13574 write_csr(dd, SEND_LEN_CHECK0, 0);
13575 write_csr(dd, SEND_LEN_CHECK1, 0);
13576 /* SEND_ERR_STATUS read-only */
13577 write_csr(dd, SEND_ERR_MASK, 0);
13578 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13579 /* SEND_ERR_FORCE read-only */
13580 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013581 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013582 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013583 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13584 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13585 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013586 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013587 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013588 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013589 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013590 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013591 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013592 /* SEND_CM_CREDIT_USED_STATUS read-only */
13593 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13594 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13595 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13596 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13597 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13598 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013599 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013600 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13601 /* SEND_CM_CREDIT_USED_VL read-only */
13602 /* SEND_CM_CREDIT_USED_VL15 read-only */
13603 /* SEND_EGRESS_CTXT_STATUS read-only */
13604 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13605 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13606 /* SEND_EGRESS_ERR_INFO read-only */
13607 /* SEND_EGRESS_ERR_SOURCE read-only */
13608
13609 /*
13610 * TXE Per-Context CSRs
13611 */
13612 for (i = 0; i < dd->chip_send_contexts; i++) {
13613 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13614 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13615 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13616 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13617 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13618 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13619 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13620 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13621 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13622 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13623 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13624 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13625 }
13626
13627 /*
13628 * TXE Per-SDMA CSRs
13629 */
13630 for (i = 0; i < dd->chip_sdma_engines; i++) {
13631 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13632 /* SEND_DMA_STATUS read-only */
13633 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13634 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13635 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13636 /* SEND_DMA_HEAD read-only */
13637 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13638 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13639 /* SEND_DMA_IDLE_CNT read-only */
13640 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13641 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13642 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13643 /* SEND_DMA_ENG_ERR_STATUS read-only */
13644 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13645 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13646 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13647 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13648 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13649 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13650 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13651 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13652 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13653 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13654 }
13655}
13656
13657/*
13658 * Expect on entry:
13659 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13660 */
13661static void init_rbufs(struct hfi1_devdata *dd)
13662{
13663 u64 reg;
13664 int count;
13665
13666 /*
13667 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13668 * clear.
13669 */
13670 count = 0;
13671 while (1) {
13672 reg = read_csr(dd, RCV_STATUS);
13673 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13674 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13675 break;
13676 /*
13677 * Give up after 1ms - maximum wait time.
13678 *
Harish Chegondie8a70af2016-09-25 07:42:01 -070013679 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
Mike Marciniszyn77241052015-07-30 15:17:43 -040013680 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
Harish Chegondie8a70af2016-09-25 07:42:01 -070013681 * 136 KB / (66% * 250MB/s) = 844us
Mike Marciniszyn77241052015-07-30 15:17:43 -040013682 */
13683 if (count++ > 500) {
13684 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013685 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13686 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013687 break;
13688 }
13689 udelay(2); /* do not busy-wait the CSR */
13690 }
13691
13692 /* start the init - expect RcvCtrl to be 0 */
13693 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13694
13695 /*
13696 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13697 * period after the write before RcvStatus.RxRbufInitDone is valid.
13698 * The delay in the first run through the loop below is sufficient and
13699 * required before the first read of RcvStatus.RxRbufInintDone.
13700 */
13701 read_csr(dd, RCV_CTRL);
13702
13703 /* wait for the init to finish */
13704 count = 0;
13705 while (1) {
13706 /* delay is required first time through - see above */
13707 udelay(2); /* do not busy-wait the CSR */
13708 reg = read_csr(dd, RCV_STATUS);
13709 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13710 break;
13711
13712 /* give up after 100us - slowest possible at 33MHz is 73us */
13713 if (count++ > 50) {
13714 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013715 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13716 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013717 break;
13718 }
13719 }
13720}
13721
13722/* set RXE CSRs to chip reset defaults */
13723static void reset_rxe_csrs(struct hfi1_devdata *dd)
13724{
13725 int i, j;
13726
13727 /*
13728 * RXE Kernel CSRs
13729 */
13730 write_csr(dd, RCV_CTRL, 0);
13731 init_rbufs(dd);
13732 /* RCV_STATUS read-only */
13733 /* RCV_CONTEXTS read-only */
13734 /* RCV_ARRAY_CNT read-only */
13735 /* RCV_BUF_SIZE read-only */
13736 write_csr(dd, RCV_BTH_QP, 0);
13737 write_csr(dd, RCV_MULTICAST, 0);
13738 write_csr(dd, RCV_BYPASS, 0);
13739 write_csr(dd, RCV_VL15, 0);
13740 /* this is a clear-down */
13741 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013742 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013743 /* RCV_ERR_STATUS read-only */
13744 write_csr(dd, RCV_ERR_MASK, 0);
13745 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13746 /* RCV_ERR_FORCE leave alone */
13747 for (i = 0; i < 32; i++)
13748 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13749 for (i = 0; i < 4; i++)
13750 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13751 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13752 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13753 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13754 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013755 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13756 clear_rsm_rule(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013757 for (i = 0; i < 32; i++)
13758 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13759
13760 /*
13761 * RXE Kernel and User Per-Context CSRs
13762 */
13763 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13764 /* kernel */
13765 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13766 /* RCV_CTXT_STATUS read-only */
13767 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13768 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13769 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13770 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13771 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13772 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13773 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13774 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13775 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13776 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13777
13778 /* user */
13779 /* RCV_HDR_TAIL read-only */
13780 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13781 /* RCV_EGR_INDEX_TAIL read-only */
13782 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13783 /* RCV_EGR_OFFSET_TAIL read-only */
13784 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013785 write_uctxt_csr(dd, i,
13786 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013787 }
13788 }
13789}
13790
13791/*
13792 * Set sc2vl tables.
13793 *
13794 * They power on to zeros, so to avoid send context errors
13795 * they need to be set:
13796 *
13797 * SC 0-7 -> VL 0-7 (respectively)
13798 * SC 15 -> VL 15
13799 * otherwise
13800 * -> VL 0
13801 */
13802static void init_sc2vl_tables(struct hfi1_devdata *dd)
13803{
13804 int i;
13805 /* init per architecture spec, constrained by hardware capability */
13806
13807 /* HFI maps sent packets */
13808 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13809 0,
13810 0, 0, 1, 1,
13811 2, 2, 3, 3,
13812 4, 4, 5, 5,
13813 6, 6, 7, 7));
13814 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13815 1,
13816 8, 0, 9, 0,
13817 10, 0, 11, 0,
13818 12, 0, 13, 0,
13819 14, 0, 15, 15));
13820 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13821 2,
13822 16, 0, 17, 0,
13823 18, 0, 19, 0,
13824 20, 0, 21, 0,
13825 22, 0, 23, 0));
13826 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13827 3,
13828 24, 0, 25, 0,
13829 26, 0, 27, 0,
13830 28, 0, 29, 0,
13831 30, 0, 31, 0));
13832
13833 /* DC maps received packets */
13834 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13835 15_0,
13836 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13837 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13838 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13839 31_16,
13840 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13841 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13842
13843 /* initialize the cached sc2vl values consistently with h/w */
13844 for (i = 0; i < 32; i++) {
13845 if (i < 8 || i == 15)
13846 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13847 else
13848 *((u8 *)(dd->sc2vl) + i) = 0;
13849 }
13850}
13851
13852/*
13853 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13854 * depend on the chip going through a power-on reset - a driver may be loaded
13855 * and unloaded many times.
13856 *
13857 * Do not write any CSR values to the chip in this routine - there may be
13858 * a reset following the (possible) FLR in this routine.
13859 *
13860 */
13861static void init_chip(struct hfi1_devdata *dd)
13862{
13863 int i;
13864
13865 /*
13866 * Put the HFI CSRs in a known state.
13867 * Combine this with a DC reset.
13868 *
13869 * Stop the device from doing anything while we do a
13870 * reset. We know there are no other active users of
13871 * the device since we are now in charge. Turn off
13872 * off all outbound and inbound traffic and make sure
13873 * the device does not generate any interrupts.
13874 */
13875
13876 /* disable send contexts and SDMA engines */
13877 write_csr(dd, SEND_CTRL, 0);
13878 for (i = 0; i < dd->chip_send_contexts; i++)
13879 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13880 for (i = 0; i < dd->chip_sdma_engines; i++)
13881 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13882 /* disable port (turn off RXE inbound traffic) and contexts */
13883 write_csr(dd, RCV_CTRL, 0);
13884 for (i = 0; i < dd->chip_rcv_contexts; i++)
13885 write_csr(dd, RCV_CTXT_CTRL, 0);
13886 /* mask all interrupt sources */
13887 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013888 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013889
13890 /*
13891 * DC Reset: do a full DC reset before the register clear.
13892 * A recommended length of time to hold is one CSR read,
13893 * so reread the CceDcCtrl. Then, hold the DC in reset
13894 * across the clear.
13895 */
13896 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013897 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013898
13899 if (use_flr) {
13900 /*
13901 * A FLR will reset the SPC core and part of the PCIe.
13902 * The parts that need to be restored have already been
13903 * saved.
13904 */
13905 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13906
13907 /* do the FLR, the DC reset will remain */
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013908 pcie_flr(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013909
13910 /* restore command and BARs */
13911 restore_pci_variables(dd);
13912
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013913 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013914 dd_dev_info(dd, "Resetting CSRs with FLR\n");
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013915 pcie_flr(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013916 restore_pci_variables(dd);
13917 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013918 } else {
13919 dd_dev_info(dd, "Resetting CSRs with writes\n");
13920 reset_cce_csrs(dd);
13921 reset_txe_csrs(dd);
13922 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013923 reset_misc_csrs(dd);
13924 }
13925 /* clear the DC reset */
13926 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013927
Mike Marciniszyn77241052015-07-30 15:17:43 -040013928 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013929 setextled(dd, 0);
13930
Mike Marciniszyn77241052015-07-30 15:17:43 -040013931 /*
13932 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013933 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013934 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013935 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013936 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013937 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013938 * I2CCLK and I2CDAT will change per direction, and INT_N and
13939 * MODPRS_N are input only and their value is ignored.
13940 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013941 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13942 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013943 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013944}
13945
13946static void init_early_variables(struct hfi1_devdata *dd)
13947{
13948 int i;
13949
13950 /* assign link credit variables */
13951 dd->vau = CM_VAU;
13952 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013953 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013954 dd->link_credits--;
13955 dd->vcu = cu_to_vcu(hfi1_cu);
13956 /* enough room for 8 MAD packets plus header - 17K */
13957 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13958 if (dd->vl15_init > dd->link_credits)
13959 dd->vl15_init = dd->link_credits;
13960
13961 write_uninitialized_csrs_and_memories(dd);
13962
13963 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13964 for (i = 0; i < dd->num_pports; i++) {
13965 struct hfi1_pportdata *ppd = &dd->pport[i];
13966
13967 set_partition_keys(ppd);
13968 }
13969 init_sc2vl_tables(dd);
13970}
13971
13972static void init_kdeth_qp(struct hfi1_devdata *dd)
13973{
13974 /* user changed the KDETH_QP */
13975 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13976 /* out of range or illegal value */
13977 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13978 kdeth_qp = 0;
13979 }
13980 if (kdeth_qp == 0) /* not set, or failed range check */
13981 kdeth_qp = DEFAULT_KDETH_QP;
13982
13983 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013984 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13985 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013986
13987 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013988 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13989 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013990}
13991
13992/**
13993 * init_qpmap_table
13994 * @dd - device data
13995 * @first_ctxt - first context
13996 * @last_ctxt - first context
13997 *
13998 * This return sets the qpn mapping table that
13999 * is indexed by qpn[8:1].
14000 *
14001 * The routine will round robin the 256 settings
14002 * from first_ctxt to last_ctxt.
14003 *
14004 * The first/last looks ahead to having specialized
14005 * receive contexts for mgmt and bypass. Normal
14006 * verbs traffic will assumed to be on a range
14007 * of receive contexts.
14008 */
14009static void init_qpmap_table(struct hfi1_devdata *dd,
14010 u32 first_ctxt,
14011 u32 last_ctxt)
14012{
14013 u64 reg = 0;
14014 u64 regno = RCV_QP_MAP_TABLE;
14015 int i;
14016 u64 ctxt = first_ctxt;
14017
Dean Luick60d585ad2016-04-12 10:50:35 -070014018 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014019 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014020 ctxt++;
14021 if (ctxt > last_ctxt)
14022 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070014023 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014024 write_csr(dd, regno, reg);
14025 reg = 0;
14026 regno += 8;
14027 }
14028 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040014029
14030 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14031 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14032}
14033
Dean Luick372cc85a2016-04-12 11:30:51 -070014034struct rsm_map_table {
14035 u64 map[NUM_MAP_REGS];
14036 unsigned int used;
14037};
14038
Dean Luickb12349a2016-04-12 11:31:33 -070014039struct rsm_rule_data {
14040 u8 offset;
14041 u8 pkt_type;
14042 u32 field1_off;
14043 u32 field2_off;
14044 u32 index1_off;
14045 u32 index1_width;
14046 u32 index2_off;
14047 u32 index2_width;
14048 u32 mask1;
14049 u32 value1;
14050 u32 mask2;
14051 u32 value2;
14052};
14053
Dean Luick372cc85a2016-04-12 11:30:51 -070014054/*
14055 * Return an initialized RMT map table for users to fill in. OK if it
14056 * returns NULL, indicating no table.
14057 */
14058static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14059{
14060 struct rsm_map_table *rmt;
14061 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14062
14063 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14064 if (rmt) {
14065 memset(rmt->map, rxcontext, sizeof(rmt->map));
14066 rmt->used = 0;
14067 }
14068
14069 return rmt;
14070}
14071
14072/*
14073 * Write the final RMT map table to the chip and free the table. OK if
14074 * table is NULL.
14075 */
14076static void complete_rsm_map_table(struct hfi1_devdata *dd,
14077 struct rsm_map_table *rmt)
14078{
14079 int i;
14080
14081 if (rmt) {
14082 /* write table to chip */
14083 for (i = 0; i < NUM_MAP_REGS; i++)
14084 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14085
14086 /* enable RSM */
14087 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14088 }
14089}
14090
Dean Luickb12349a2016-04-12 11:31:33 -070014091/*
14092 * Add a receive side mapping rule.
14093 */
14094static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14095 struct rsm_rule_data *rrd)
14096{
14097 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14098 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14099 1ull << rule_index | /* enable bit */
14100 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14101 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14102 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14103 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14104 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14105 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14106 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14107 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14108 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14109 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14110 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14111 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14112 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14113}
14114
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014115/*
14116 * Clear a receive side mapping rule.
14117 */
14118static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14119{
14120 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14121 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14122 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14123}
14124
Dean Luick4a818be2016-04-12 11:31:11 -070014125/* return the number of RSM map table entries that will be used for QOS */
14126static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14127 unsigned int *np)
14128{
14129 int i;
14130 unsigned int m, n;
14131 u8 max_by_vl = 0;
14132
14133 /* is QOS active at all? */
14134 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14135 num_vls == 1 ||
14136 krcvqsset <= 1)
14137 goto no_qos;
14138
14139 /* determine bits for qpn */
14140 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14141 if (krcvqs[i] > max_by_vl)
14142 max_by_vl = krcvqs[i];
14143 if (max_by_vl > 32)
14144 goto no_qos;
14145 m = ilog2(__roundup_pow_of_two(max_by_vl));
14146
14147 /* determine bits for vl */
14148 n = ilog2(__roundup_pow_of_two(num_vls));
14149
14150 /* reject if too much is used */
14151 if ((m + n) > 7)
14152 goto no_qos;
14153
14154 if (mp)
14155 *mp = m;
14156 if (np)
14157 *np = n;
14158
14159 return 1 << (m + n);
14160
14161no_qos:
14162 if (mp)
14163 *mp = 0;
14164 if (np)
14165 *np = 0;
14166 return 0;
14167}
14168
Mike Marciniszyn77241052015-07-30 15:17:43 -040014169/**
14170 * init_qos - init RX qos
14171 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070014172 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040014173 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014174 * This routine initializes Rule 0 and the RSM map table to implement
14175 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040014176 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014177 * If all of the limit tests succeed, qos is applied based on the array
14178 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014179 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014180 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14181 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014182 */
Dean Luick372cc85a2016-04-12 11:30:51 -070014183static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014184{
Dean Luickb12349a2016-04-12 11:31:33 -070014185 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014186 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070014187 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014188 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014189
Dean Luick4a818be2016-04-12 11:31:11 -070014190 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014191 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014192 rmt_entries = qos_rmt_entries(dd, &m, &n);
14193 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014194 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014195 qpns_per_vl = 1 << m;
14196
Dean Luick372cc85a2016-04-12 11:30:51 -070014197 /* enough room in the map table? */
14198 rmt_entries = 1 << (m + n);
14199 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050014200 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014201
Dean Luick372cc85a2016-04-12 11:30:51 -070014202 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070014203 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014204 unsigned tctxt;
14205
14206 for (qpn = 0, tctxt = ctxt;
14207 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14208 unsigned idx, regoff, regidx;
14209
Dean Luick372cc85a2016-04-12 11:30:51 -070014210 /* generate the index the hardware will produce */
14211 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014212 regoff = (idx % 8) * 8;
14213 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070014214 /* replace default with context number */
14215 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040014216 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14217 << regoff);
14218 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070014219 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014220 if (tctxt == ctxt + krcvqs[i])
14221 tctxt = ctxt;
14222 }
14223 ctxt += krcvqs[i];
14224 }
Dean Luickb12349a2016-04-12 11:31:33 -070014225
14226 rrd.offset = rmt->used;
14227 rrd.pkt_type = 2;
14228 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14229 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14230 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14231 rrd.index1_width = n;
14232 rrd.index2_off = QPN_SELECT_OFFSET;
14233 rrd.index2_width = m + n;
14234 rrd.mask1 = LRH_BTH_MASK;
14235 rrd.value1 = LRH_BTH_VALUE;
14236 rrd.mask2 = LRH_SC_MASK;
14237 rrd.value2 = LRH_SC_VALUE;
14238
14239 /* add rule 0 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014240 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
Dean Luickb12349a2016-04-12 11:31:33 -070014241
Dean Luick372cc85a2016-04-12 11:30:51 -070014242 /* mark RSM map entries as used */
14243 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070014244 /* map everything else to the mcast/err/vl15 context */
14245 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014246 dd->qos_shift = n + 1;
14247 return;
14248bail:
14249 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050014250 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014251}
14252
Dean Luick8f000f72016-04-12 11:32:06 -070014253static void init_user_fecn_handling(struct hfi1_devdata *dd,
14254 struct rsm_map_table *rmt)
14255{
14256 struct rsm_rule_data rrd;
14257 u64 reg;
14258 int i, idx, regoff, regidx;
14259 u8 offset;
14260
14261 /* there needs to be enough room in the map table */
14262 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14263 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14264 return;
14265 }
14266
14267 /*
14268 * RSM will extract the destination context as an index into the
14269 * map table. The destination contexts are a sequential block
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014270 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
Dean Luick8f000f72016-04-12 11:32:06 -070014271 * Map entries are accessed as offset + extracted value. Adjust
14272 * the added offset so this sequence can be placed anywhere in
14273 * the table - as long as the entries themselves do not wrap.
14274 * There are only enough bits in offset for the table size, so
14275 * start with that to allow for a "negative" offset.
14276 */
14277 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014278 (int)dd->first_dyn_alloc_ctxt);
Dean Luick8f000f72016-04-12 11:32:06 -070014279
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014280 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
Dean Luick8f000f72016-04-12 11:32:06 -070014281 i < dd->num_rcv_contexts; i++, idx++) {
14282 /* replace with identity mapping */
14283 regoff = (idx % 8) * 8;
14284 regidx = idx / 8;
14285 reg = rmt->map[regidx];
14286 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14287 reg |= (u64)i << regoff;
14288 rmt->map[regidx] = reg;
14289 }
14290
14291 /*
14292 * For RSM intercept of Expected FECN packets:
14293 * o packet type 0 - expected
14294 * o match on F (bit 95), using select/match 1, and
14295 * o match on SH (bit 133), using select/match 2.
14296 *
14297 * Use index 1 to extract the 8-bit receive context from DestQP
14298 * (start at bit 64). Use that as the RSM map table index.
14299 */
14300 rrd.offset = offset;
14301 rrd.pkt_type = 0;
14302 rrd.field1_off = 95;
14303 rrd.field2_off = 133;
14304 rrd.index1_off = 64;
14305 rrd.index1_width = 8;
14306 rrd.index2_off = 0;
14307 rrd.index2_width = 0;
14308 rrd.mask1 = 1;
14309 rrd.value1 = 1;
14310 rrd.mask2 = 1;
14311 rrd.value2 = 1;
14312
14313 /* add rule 1 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014314 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
Dean Luick8f000f72016-04-12 11:32:06 -070014315
14316 rmt->used += dd->num_user_contexts;
14317}
14318
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014319/* Initialize RSM for VNIC */
14320void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14321{
14322 u8 i, j;
14323 u8 ctx_id = 0;
14324 u64 reg;
14325 u32 regoff;
14326 struct rsm_rule_data rrd;
14327
14328 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14329 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14330 dd->vnic.rmt_start);
14331 return;
14332 }
14333
14334 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14335 dd->vnic.rmt_start,
14336 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14337
14338 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14339 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14340 reg = read_csr(dd, regoff);
14341 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14342 /* Update map register with vnic context */
14343 j = (dd->vnic.rmt_start + i) % 8;
14344 reg &= ~(0xffllu << (j * 8));
14345 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14346 /* Wrap up vnic ctx index */
14347 ctx_id %= dd->vnic.num_ctxt;
14348 /* Write back map register */
14349 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14350 dev_dbg(&(dd)->pcidev->dev,
14351 "Vnic rsm map reg[%d] =0x%llx\n",
14352 regoff - RCV_RSM_MAP_TABLE, reg);
14353
14354 write_csr(dd, regoff, reg);
14355 regoff += 8;
14356 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14357 reg = read_csr(dd, regoff);
14358 }
14359 }
14360
14361 /* Add rule for vnic */
14362 rrd.offset = dd->vnic.rmt_start;
14363 rrd.pkt_type = 4;
14364 /* Match 16B packets */
14365 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14366 rrd.mask1 = L2_TYPE_MASK;
14367 rrd.value1 = L2_16B_VALUE;
14368 /* Match ETH L4 packets */
14369 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14370 rrd.mask2 = L4_16B_TYPE_MASK;
14371 rrd.value2 = L4_16B_ETH_VALUE;
14372 /* Calc context from veswid and entropy */
14373 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14374 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14375 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14376 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14377 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14378
14379 /* Enable RSM if not already enabled */
14380 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14381}
14382
14383void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14384{
14385 clear_rsm_rule(dd, RSM_INS_VNIC);
14386
14387 /* Disable RSM if used only by vnic */
14388 if (dd->vnic.rmt_start == 0)
14389 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14390}
14391
Mike Marciniszyn77241052015-07-30 15:17:43 -040014392static void init_rxe(struct hfi1_devdata *dd)
14393{
Dean Luick372cc85a2016-04-12 11:30:51 -070014394 struct rsm_map_table *rmt;
14395
Mike Marciniszyn77241052015-07-30 15:17:43 -040014396 /* enable all receive errors */
14397 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070014398
14399 rmt = alloc_rsm_map_table(dd);
14400 /* set up QOS, including the QPN map table */
14401 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070014402 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070014403 complete_rsm_map_table(dd, rmt);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014404 /* record number of used rsm map entries for vnic */
14405 dd->vnic.rmt_start = rmt->used;
Dean Luick372cc85a2016-04-12 11:30:51 -070014406 kfree(rmt);
14407
Mike Marciniszyn77241052015-07-30 15:17:43 -040014408 /*
14409 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14410 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14411 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14412 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14413 * Max_PayLoad_Size set to its minimum of 128.
14414 *
14415 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14416 * (64 bytes). Max_Payload_Size is possibly modified upward in
14417 * tune_pcie_caps() which is called after this routine.
14418 */
14419}
14420
14421static void init_other(struct hfi1_devdata *dd)
14422{
14423 /* enable all CCE errors */
14424 write_csr(dd, CCE_ERR_MASK, ~0ull);
14425 /* enable *some* Misc errors */
14426 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14427 /* enable all DC errors, except LCB */
14428 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14429 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14430}
14431
14432/*
14433 * Fill out the given AU table using the given CU. A CU is defined in terms
14434 * AUs. The table is a an encoding: given the index, how many AUs does that
14435 * represent?
14436 *
14437 * NOTE: Assumes that the register layout is the same for the
14438 * local and remote tables.
14439 */
14440static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14441 u32 csr0to3, u32 csr4to7)
14442{
14443 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080014444 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14445 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14446 2ull * cu <<
14447 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14448 4ull * cu <<
14449 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014450 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080014451 8ull * cu <<
14452 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14453 16ull * cu <<
14454 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14455 32ull * cu <<
14456 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14457 64ull * cu <<
14458 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014459}
14460
14461static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14462{
14463 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014464 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014465}
14466
14467void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14468{
14469 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014470 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014471}
14472
14473static void init_txe(struct hfi1_devdata *dd)
14474{
14475 int i;
14476
14477 /* enable all PIO, SDMA, general, and Egress errors */
14478 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14479 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14480 write_csr(dd, SEND_ERR_MASK, ~0ull);
14481 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14482
14483 /* enable all per-context and per-SDMA engine errors */
14484 for (i = 0; i < dd->chip_send_contexts; i++)
14485 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14486 for (i = 0; i < dd->chip_sdma_engines; i++)
14487 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14488
14489 /* set the local CU to AU mapping */
14490 assign_local_cm_au_table(dd, dd->vcu);
14491
14492 /*
14493 * Set reasonable default for Credit Return Timer
14494 * Don't set on Simulator - causes it to choke.
14495 */
14496 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14497 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14498}
14499
14500int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14501{
14502 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14503 unsigned sctxt;
14504 int ret = 0;
14505 u64 reg;
14506
14507 if (!rcd || !rcd->sc) {
14508 ret = -EINVAL;
14509 goto done;
14510 }
14511 sctxt = rcd->sc->hw_context;
14512 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14513 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14514 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14515 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14516 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14517 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14518 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14519 /*
14520 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014521 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014522 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014523 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14524 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14525 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14526 }
14527
14528 /* Enable J_KEY check on receive context. */
14529 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14530 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14531 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14532 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14533done:
14534 return ret;
14535}
14536
14537int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14538{
14539 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14540 unsigned sctxt;
14541 int ret = 0;
14542 u64 reg;
14543
14544 if (!rcd || !rcd->sc) {
14545 ret = -EINVAL;
14546 goto done;
14547 }
14548 sctxt = rcd->sc->hw_context;
14549 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14550 /*
14551 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14552 * This check would not have been enabled for A0 h/w, see
14553 * set_ctxt_jkey().
14554 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014555 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014556 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14557 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14558 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14559 }
14560 /* Turn off the J_KEY on the receive side */
14561 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14562done:
14563 return ret;
14564}
14565
14566int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14567{
14568 struct hfi1_ctxtdata *rcd;
14569 unsigned sctxt;
14570 int ret = 0;
14571 u64 reg;
14572
Jubin Johne4909742016-02-14 20:22:00 -080014573 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014574 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014575 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014576 ret = -EINVAL;
14577 goto done;
14578 }
14579 if (!rcd || !rcd->sc) {
14580 ret = -EINVAL;
14581 goto done;
14582 }
14583 sctxt = rcd->sc->hw_context;
14584 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14585 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14586 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14587 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14588 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014589 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014590 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14591done:
14592 return ret;
14593}
14594
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014595int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014596{
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014597 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014598 u64 reg;
14599
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014600 if (!ctxt || !ctxt->sc)
14601 return -EINVAL;
14602
14603 if (ctxt->ctxt >= dd->num_rcv_contexts)
14604 return -EINVAL;
14605
14606 hw_ctxt = ctxt->sc->hw_context;
14607 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014608 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014609 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14610 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14611
14612 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014613}
14614
14615/*
14616 * Start doing the clean up the the chip. Our clean up happens in multiple
14617 * stages and this is just the first.
14618 */
14619void hfi1_start_cleanup(struct hfi1_devdata *dd)
14620{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014621 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014622 free_cntrs(dd);
14623 free_rcverr(dd);
14624 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014625 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014626}
14627
14628#define HFI_BASE_GUID(dev) \
14629 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14630
14631/*
Dean Luick78eb1292016-03-05 08:49:45 -080014632 * Information can be shared between the two HFIs on the same ASIC
14633 * in the same OS. This function finds the peer device and sets
14634 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014635 */
Dean Luick78eb1292016-03-05 08:49:45 -080014636static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014637{
14638 unsigned long flags;
14639 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014640 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014641 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014642
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014643 /* pre-allocate the asic structure in case we are the first device */
14644 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14645 if (!asic_data)
14646 return -ENOMEM;
14647
Mike Marciniszyn77241052015-07-30 15:17:43 -040014648 spin_lock_irqsave(&hfi1_devs_lock, flags);
14649 /* Find our peer device */
14650 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14651 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14652 dd->unit != tmp->unit) {
14653 peer = tmp;
14654 break;
14655 }
14656 }
14657
Dean Luick78eb1292016-03-05 08:49:45 -080014658 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014659 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014660 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014661 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014662 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014663 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014664 mutex_init(&dd->asic_data->asic_resource_mutex);
14665 }
14666 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014667 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -040014668
14669 /* first one through - set up i2c devices */
14670 if (!peer)
14671 ret = set_up_i2c(dd, dd->asic_data);
14672
Dean Luick78eb1292016-03-05 08:49:45 -080014673 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014674}
14675
Dean Luick5d9157a2015-11-16 21:59:34 -050014676/*
14677 * Set dd->boardname. Use a generic name if a name is not returned from
14678 * EFI variable space.
14679 *
14680 * Return 0 on success, -ENOMEM if space could not be allocated.
14681 */
14682static int obtain_boardname(struct hfi1_devdata *dd)
14683{
14684 /* generic board description */
14685 const char generic[] =
14686 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14687 unsigned long size;
14688 int ret;
14689
14690 ret = read_hfi1_efi_var(dd, "description", &size,
14691 (void **)&dd->boardname);
14692 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014693 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014694 /* use generic description */
14695 dd->boardname = kstrdup(generic, GFP_KERNEL);
14696 if (!dd->boardname)
14697 return -ENOMEM;
14698 }
14699 return 0;
14700}
14701
Kaike Wan24487dd2016-02-26 13:33:23 -080014702/*
14703 * Check the interrupt registers to make sure that they are mapped correctly.
14704 * It is intended to help user identify any mismapping by VMM when the driver
14705 * is running in a VM. This function should only be called before interrupt
14706 * is set up properly.
14707 *
14708 * Return 0 on success, -EINVAL on failure.
14709 */
14710static int check_int_registers(struct hfi1_devdata *dd)
14711{
14712 u64 reg;
14713 u64 all_bits = ~(u64)0;
14714 u64 mask;
14715
14716 /* Clear CceIntMask[0] to avoid raising any interrupts */
14717 mask = read_csr(dd, CCE_INT_MASK);
14718 write_csr(dd, CCE_INT_MASK, 0ull);
14719 reg = read_csr(dd, CCE_INT_MASK);
14720 if (reg)
14721 goto err_exit;
14722
14723 /* Clear all interrupt status bits */
14724 write_csr(dd, CCE_INT_CLEAR, all_bits);
14725 reg = read_csr(dd, CCE_INT_STATUS);
14726 if (reg)
14727 goto err_exit;
14728
14729 /* Set all interrupt status bits */
14730 write_csr(dd, CCE_INT_FORCE, all_bits);
14731 reg = read_csr(dd, CCE_INT_STATUS);
14732 if (reg != all_bits)
14733 goto err_exit;
14734
14735 /* Restore the interrupt mask */
14736 write_csr(dd, CCE_INT_CLEAR, all_bits);
14737 write_csr(dd, CCE_INT_MASK, mask);
14738
14739 return 0;
14740err_exit:
14741 write_csr(dd, CCE_INT_MASK, mask);
14742 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14743 return -EINVAL;
14744}
14745
Mike Marciniszyn77241052015-07-30 15:17:43 -040014746/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014747 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014748 * @dev: the pci_dev for hfi1_ib device
14749 * @ent: pci_device_id struct for this dev
14750 *
14751 * Also allocates, initializes, and returns the devdata struct for this
14752 * device instance
14753 *
14754 * This is global, and is called directly at init to set up the
14755 * chip-specific function pointers for later use.
14756 */
14757struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14758 const struct pci_device_id *ent)
14759{
14760 struct hfi1_devdata *dd;
14761 struct hfi1_pportdata *ppd;
14762 u64 reg;
14763 int i, ret;
14764 static const char * const inames[] = { /* implementation names */
14765 "RTL silicon",
14766 "RTL VCS simulation",
14767 "RTL FPGA emulation",
14768 "Functional simulator"
14769 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014770 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014771
Jubin John17fb4f22016-02-14 20:21:52 -080014772 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14773 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014774 if (IS_ERR(dd))
14775 goto bail;
14776 ppd = dd->pport;
14777 for (i = 0; i < dd->num_pports; i++, ppd++) {
14778 int vl;
14779 /* init common fields */
14780 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14781 /* DC supports 4 link widths */
14782 ppd->link_width_supported =
14783 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14784 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14785 ppd->link_width_downgrade_supported =
14786 ppd->link_width_supported;
14787 /* start out enabling only 4X */
14788 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14789 ppd->link_width_downgrade_enabled =
14790 ppd->link_width_downgrade_supported;
14791 /* link width active is 0 when link is down */
14792 /* link width downgrade active is 0 when link is down */
14793
Jubin Johnd0d236e2016-02-14 20:20:15 -080014794 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14795 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014796 hfi1_early_err(&pdev->dev,
14797 "Invalid num_vls %u, using %u VLs\n",
14798 num_vls, HFI1_MAX_VLS_SUPPORTED);
14799 num_vls = HFI1_MAX_VLS_SUPPORTED;
14800 }
14801 ppd->vls_supported = num_vls;
14802 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014803 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014804 /* Set the default MTU. */
14805 for (vl = 0; vl < num_vls; vl++)
14806 dd->vld[vl].mtu = hfi1_max_mtu;
14807 dd->vld[15].mtu = MAX_MAD_PACKET;
14808 /*
14809 * Set the initial values to reasonable default, will be set
14810 * for real when link is up.
14811 */
14812 ppd->lstate = IB_PORT_DOWN;
14813 ppd->overrun_threshold = 0x4;
14814 ppd->phy_error_threshold = 0xf;
14815 ppd->port_crc_mode_enabled = link_crc_mask;
14816 /* initialize supported LTP CRC mode */
14817 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14818 /* initialize enabled LTP CRC mode */
14819 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14820 /* start in offline */
14821 ppd->host_link_state = HLS_DN_OFFLINE;
14822 init_vl_arb_caches(ppd);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070014823 ppd->pstate = PLS_OFFLINE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014824 }
14825
14826 dd->link_default = HLS_DN_POLL;
14827
14828 /*
14829 * Do remaining PCIe setup and save PCIe values in dd.
14830 * Any error printing is already done by the init code.
14831 * On return, we have the chip mapped.
14832 */
Easwar Hariharan26ea2542016-10-17 04:19:58 -070014833 ret = hfi1_pcie_ddinit(dd, pdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014834 if (ret < 0)
14835 goto bail_free;
14836
14837 /* verify that reads actually work, save revision for reset check */
14838 dd->revision = read_csr(dd, CCE_REVISION);
14839 if (dd->revision == ~(u64)0) {
14840 dd_dev_err(dd, "cannot read chip CSRs\n");
14841 ret = -EINVAL;
14842 goto bail_cleanup;
14843 }
14844 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14845 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14846 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14847 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14848
Jubin John4d114fd2016-02-14 20:21:43 -080014849 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014850 * Check interrupt registers mapping if the driver has no access to
14851 * the upstream component. In this case, it is likely that the driver
14852 * is running in a VM.
14853 */
14854 if (!parent) {
14855 ret = check_int_registers(dd);
14856 if (ret)
14857 goto bail_cleanup;
14858 }
14859
14860 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014861 * obtain the hardware ID - NOT related to unit, which is a
14862 * software enumeration
14863 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014864 reg = read_csr(dd, CCE_REVISION2);
14865 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14866 & CCE_REVISION2_HFI_ID_MASK;
14867 /* the variable size will remove unwanted bits */
14868 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14869 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14870 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014871 dd->icode < ARRAY_SIZE(inames) ?
14872 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014873
14874 /* speeds the hardware can support */
14875 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14876 /* speeds allowed to run at */
14877 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14878 /* give a reasonable active value, will be set on link up */
14879 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14880
14881 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14882 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14883 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14884 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14885 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14886 /* fix up link widths for emulation _p */
14887 ppd = dd->pport;
14888 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14889 ppd->link_width_supported =
14890 ppd->link_width_enabled =
14891 ppd->link_width_downgrade_supported =
14892 ppd->link_width_downgrade_enabled =
14893 OPA_LINK_WIDTH_1X;
14894 }
14895 /* insure num_vls isn't larger than number of sdma engines */
14896 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14897 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014898 num_vls, dd->chip_sdma_engines);
14899 num_vls = dd->chip_sdma_engines;
14900 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014901 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014902 }
14903
14904 /*
14905 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14906 * Limit the max if larger than the field holds. If timeout is
14907 * non-zero, then the calculated field will be at least 1.
14908 *
14909 * Must be after icode is set up - the cclock rate depends
14910 * on knowing the hardware being used.
14911 */
14912 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14913 if (dd->rcv_intr_timeout_csr >
14914 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14915 dd->rcv_intr_timeout_csr =
14916 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14917 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14918 dd->rcv_intr_timeout_csr = 1;
14919
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014920 /* needs to be done before we look for the peer device */
14921 read_guid(dd);
14922
Dean Luick78eb1292016-03-05 08:49:45 -080014923 /* set up shared ASIC data with peer device */
14924 ret = init_asic_data(dd);
14925 if (ret)
14926 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014927
Mike Marciniszyn77241052015-07-30 15:17:43 -040014928 /* obtain chip sizes, reset chip CSRs */
14929 init_chip(dd);
14930
14931 /* read in the PCIe link speed information */
14932 ret = pcie_speeds(dd);
14933 if (ret)
14934 goto bail_cleanup;
14935
Dean Luicke83eba22016-09-30 04:41:45 -070014936 /* call before get_platform_config(), after init_chip_resources() */
14937 ret = eprom_init(dd);
14938 if (ret)
14939 goto bail_free_rcverr;
14940
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014941 /* Needs to be called before hfi1_firmware_init */
14942 get_platform_config(dd);
14943
Mike Marciniszyn77241052015-07-30 15:17:43 -040014944 /* read in firmware */
14945 ret = hfi1_firmware_init(dd);
14946 if (ret)
14947 goto bail_cleanup;
14948
14949 /*
14950 * In general, the PCIe Gen3 transition must occur after the
14951 * chip has been idled (so it won't initiate any PCIe transactions
14952 * e.g. an interrupt) and before the driver changes any registers
14953 * (the transition will reset the registers).
14954 *
14955 * In particular, place this call after:
14956 * - init_chip() - the chip will not initiate any PCIe transactions
14957 * - pcie_speeds() - reads the current link speed
14958 * - hfi1_firmware_init() - the needed firmware is ready to be
14959 * downloaded
14960 */
14961 ret = do_pcie_gen3_transition(dd);
14962 if (ret)
14963 goto bail_cleanup;
14964
14965 /* start setting dd values and adjusting CSRs */
14966 init_early_variables(dd);
14967
14968 parse_platform_config(dd);
14969
Dean Luick5d9157a2015-11-16 21:59:34 -050014970 ret = obtain_boardname(dd);
14971 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014972 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014973
14974 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014975 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014976 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014977 (u32)dd->majrev,
14978 (u32)dd->minrev,
14979 (dd->revision >> CCE_REVISION_SW_SHIFT)
14980 & CCE_REVISION_SW_MASK);
14981
14982 ret = set_up_context_variables(dd);
14983 if (ret)
14984 goto bail_cleanup;
14985
14986 /* set initial RXE CSRs */
14987 init_rxe(dd);
14988 /* set initial TXE CSRs */
14989 init_txe(dd);
14990 /* set initial non-RXE, non-TXE CSRs */
14991 init_other(dd);
14992 /* set up KDETH QP prefix in both RX and TX CSRs */
14993 init_kdeth_qp(dd);
14994
Dennis Dalessandro41973442016-07-25 07:52:36 -070014995 ret = hfi1_dev_affinity_init(dd);
14996 if (ret)
14997 goto bail_cleanup;
Mitko Haralanov957558c2016-02-03 14:33:40 -080014998
Mike Marciniszyn77241052015-07-30 15:17:43 -040014999 /* send contexts must be set up before receive contexts */
15000 ret = init_send_contexts(dd);
15001 if (ret)
15002 goto bail_cleanup;
15003
15004 ret = hfi1_create_ctxts(dd);
15005 if (ret)
15006 goto bail_cleanup;
15007
15008 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15009 /*
15010 * rcd[0] is guaranteed to be valid by this point. Also, all
15011 * context are using the same value, as per the module parameter.
15012 */
15013 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15014
15015 ret = init_pervl_scs(dd);
15016 if (ret)
15017 goto bail_cleanup;
15018
15019 /* sdma init */
15020 for (i = 0; i < dd->num_pports; ++i) {
15021 ret = sdma_init(dd, i);
15022 if (ret)
15023 goto bail_cleanup;
15024 }
15025
15026 /* use contexts created by hfi1_create_ctxts */
15027 ret = set_up_interrupts(dd);
15028 if (ret)
15029 goto bail_cleanup;
15030
15031 /* set up LCB access - must be after set_up_interrupts() */
15032 init_lcb_access(dd);
15033
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015034 /*
15035 * Serial number is created from the base guid:
15036 * [27:24] = base guid [38:35]
15037 * [23: 0] = base guid [23: 0]
15038 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040015039 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015040 (dd->base_guid & 0xFFFFFF) |
15041 ((dd->base_guid >> 11) & 0xF000000));
Mike Marciniszyn77241052015-07-30 15:17:43 -040015042
15043 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15044 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15045 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15046
15047 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15048 if (ret)
15049 goto bail_clear_intr;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015050
15051 thermal_init(dd);
15052
15053 ret = init_cntrs(dd);
15054 if (ret)
15055 goto bail_clear_intr;
15056
15057 ret = init_rcverr(dd);
15058 if (ret)
15059 goto bail_free_cntrs;
15060
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -070015061 init_completion(&dd->user_comp);
15062
15063 /* The user refcount starts with one to inidicate an active device */
15064 atomic_set(&dd->user_refcount, 1);
15065
Mike Marciniszyn77241052015-07-30 15:17:43 -040015066 goto bail;
15067
15068bail_free_rcverr:
15069 free_rcverr(dd);
15070bail_free_cntrs:
15071 free_cntrs(dd);
15072bail_clear_intr:
15073 clean_up_interrupts(dd);
15074bail_cleanup:
15075 hfi1_pcie_ddcleanup(dd);
15076bail_free:
15077 hfi1_free_devdata(dd);
15078 dd = ERR_PTR(ret);
15079bail:
15080 return dd;
15081}
15082
15083static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15084 u32 dw_len)
15085{
15086 u32 delta_cycles;
15087 u32 current_egress_rate = ppd->current_egress_rate;
15088 /* rates here are in units of 10^6 bits/sec */
15089
15090 if (desired_egress_rate == -1)
15091 return 0; /* shouldn't happen */
15092
15093 if (desired_egress_rate >= current_egress_rate)
15094 return 0; /* we can't help go faster, only slower */
15095
15096 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15097 egress_cycles(dw_len * 4, current_egress_rate);
15098
15099 return (u16)delta_cycles;
15100}
15101
Mike Marciniszyn77241052015-07-30 15:17:43 -040015102/**
15103 * create_pbc - build a pbc for transmission
15104 * @flags: special case flags or-ed in built pbc
15105 * @srate: static rate
15106 * @vl: vl
15107 * @dwlen: dword length (header words + data words + pbc words)
15108 *
15109 * Create a PBC with the given flags, rate, VL, and length.
15110 *
15111 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15112 * for verbs, which does not use this PSM feature. The lone other caller
15113 * is for the diagnostic interface which calls this if the user does not
15114 * supply their own PBC.
15115 */
15116u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15117 u32 dw_len)
15118{
15119 u64 pbc, delay = 0;
15120
15121 if (unlikely(srate_mbs))
15122 delay = delay_cycles(ppd, srate_mbs, dw_len);
15123
15124 pbc = flags
15125 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15126 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15127 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15128 | (dw_len & PBC_LENGTH_DWS_MASK)
15129 << PBC_LENGTH_DWS_SHIFT;
15130
15131 return pbc;
15132}
15133
15134#define SBUS_THERMAL 0x4f
15135#define SBUS_THERM_MONITOR_MODE 0x1
15136
15137#define THERM_FAILURE(dev, ret, reason) \
15138 dd_dev_err((dd), \
15139 "Thermal sensor initialization failed: %s (%d)\n", \
15140 (reason), (ret))
15141
15142/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070015143 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040015144 *
15145 * After initialization, enable polling of thermal sensor through
15146 * SBus interface. In order for this to work, the SBus Master
15147 * firmware has to be loaded due to the fact that the HW polling
15148 * logic uses SBus interrupts, which are not supported with
15149 * default firmware. Otherwise, no data will be returned through
15150 * the ASIC_STS_THERM CSR.
15151 */
15152static int thermal_init(struct hfi1_devdata *dd)
15153{
15154 int ret = 0;
15155
15156 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080015157 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040015158 return ret;
15159
Dean Luick576531f2016-03-05 08:50:01 -080015160 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15161 if (ret) {
15162 THERM_FAILURE(dd, ret, "Acquire SBus");
15163 return ret;
15164 }
15165
Mike Marciniszyn77241052015-07-30 15:17:43 -040015166 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050015167 /* Disable polling of thermal readings */
15168 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15169 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015170 /* Thermal Sensor Initialization */
15171 /* Step 1: Reset the Thermal SBus Receiver */
15172 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15173 RESET_SBUS_RECEIVER, 0);
15174 if (ret) {
15175 THERM_FAILURE(dd, ret, "Bus Reset");
15176 goto done;
15177 }
15178 /* Step 2: Set Reset bit in Thermal block */
15179 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15180 WRITE_SBUS_RECEIVER, 0x1);
15181 if (ret) {
15182 THERM_FAILURE(dd, ret, "Therm Block Reset");
15183 goto done;
15184 }
15185 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15186 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15187 WRITE_SBUS_RECEIVER, 0x32);
15188 if (ret) {
15189 THERM_FAILURE(dd, ret, "Write Clock Div");
15190 goto done;
15191 }
15192 /* Step 4: Select temperature mode */
15193 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15194 WRITE_SBUS_RECEIVER,
15195 SBUS_THERM_MONITOR_MODE);
15196 if (ret) {
15197 THERM_FAILURE(dd, ret, "Write Mode Sel");
15198 goto done;
15199 }
15200 /* Step 5: De-assert block reset and start conversion */
15201 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15202 WRITE_SBUS_RECEIVER, 0x2);
15203 if (ret) {
15204 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15205 goto done;
15206 }
15207 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15208 msleep(22);
15209
15210 /* Enable polling of thermal readings */
15211 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080015212
15213 /* Set initialized flag */
15214 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15215 if (ret)
15216 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15217
Mike Marciniszyn77241052015-07-30 15:17:43 -040015218done:
Dean Luick576531f2016-03-05 08:50:01 -080015219 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015220 return ret;
15221}
15222
15223static void handle_temp_err(struct hfi1_devdata *dd)
15224{
15225 struct hfi1_pportdata *ppd = &dd->pport[0];
15226 /*
15227 * Thermal Critical Interrupt
15228 * Put the device into forced freeze mode, take link down to
15229 * offline, and put DC into reset.
15230 */
15231 dd_dev_emerg(dd,
15232 "Critical temperature reached! Forcing device into freeze mode!\n");
15233 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080015234 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015235 /*
15236 * Shut DC down as much and as quickly as possible.
15237 *
15238 * Step 1: Take the link down to OFFLINE. This will cause the
15239 * 8051 to put the Serdes in reset. However, we don't want to
15240 * go through the entire link state machine since we want to
15241 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15242 * but rather an attempt to save the chip.
15243 * Code below is almost the same as quiet_serdes() but avoids
15244 * all the extra work and the sleeps.
15245 */
15246 ppd->driver_link_ready = 0;
15247 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080015248 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15249 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015250 /*
15251 * Step 2: Shutdown LCB and 8051
15252 * After shutdown, do not restore DC_CFG_RESET value.
15253 */
15254 dc_shutdown(dd);
15255}