blob: cbda3738698219b37125eabe0981e345ea461a93 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070066#include "affinity.h"
Don Hiatt243d9f42017-03-20 17:26:20 -070067#include "debugfs.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Harish Chegondi8784ac02016-07-25 13:38:50 -0700126#define DEFAULT_KRCVQS 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700129
130/*
131 * RSM instance allocation
132 * 0 - Verbs
133 * 1 - User Fecn Handling
134 * 2 - Vnic
135 */
136#define RSM_INS_VERBS 0
137#define RSM_INS_FECN 1
138#define RSM_INS_VNIC 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400139
140/* Bit offset into the GUID which carries HFI id information */
141#define GUID_HFI_INDEX_SHIFT 39
142
143/* extract the emulation revision */
144#define emulator_rev(dd) ((dd)->irev >> 8)
145/* parallel and serial emulation versions are 3 and 4 respectively */
146#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
148
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700149/* RSM fields for Verbs */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150/* packet type */
151#define IB_PACKET_TYPE 2ull
152#define QW_SHIFT 6ull
153/* QPN[7..1] */
154#define QPN_WIDTH 7ull
155
156/* LRH.BTH: QW 0, OFFSET 48 - for match */
157#define LRH_BTH_QW 0ull
158#define LRH_BTH_BIT_OFFSET 48ull
159#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161#define LRH_BTH_SELECT
162#define LRH_BTH_MASK 3ull
163#define LRH_BTH_VALUE 2ull
164
165/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166#define LRH_SC_QW 0ull
167#define LRH_SC_BIT_OFFSET 56ull
168#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170#define LRH_SC_MASK 128ull
171#define LRH_SC_VALUE 0ull
172
173/* SC[n..0] QW 0, OFFSET 60 - for select */
174#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
175
176/* QPN[m+n:1] QW 1, OFFSET 1 */
177#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
178
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700179/* RSM fields for Vnic */
180/* L2_TYPE: QW 0, OFFSET 61 - for match */
181#define L2_TYPE_QW 0ull
182#define L2_TYPE_BIT_OFFSET 61ull
183#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185#define L2_TYPE_MASK 3ull
186#define L2_16B_VALUE 2ull
187
188/* L4_TYPE QW 1, OFFSET 0 - for match */
189#define L4_TYPE_QW 1ull
190#define L4_TYPE_BIT_OFFSET 0ull
191#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193#define L4_16B_TYPE_MASK 0xFFull
194#define L4_16B_ETH_VALUE 0x78ull
195
196/* 16B VESWID - for select */
197#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198/* 16B ENTROPY - for select */
199#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
200
Mike Marciniszyn77241052015-07-30 15:17:43 -0400201/* defines to build power on SC2VL table */
202#define SC2VL_VAL( \
203 num, \
204 sc0, sc0val, \
205 sc1, sc1val, \
206 sc2, sc2val, \
207 sc3, sc3val, \
208 sc4, sc4val, \
209 sc5, sc5val, \
210 sc6, sc6val, \
211 sc7, sc7val) \
212( \
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
221)
222
223#define DC_SC_VL_VAL( \
224 range, \
225 e0, e0val, \
226 e1, e1val, \
227 e2, e2val, \
228 e3, e3val, \
229 e4, e4val, \
230 e5, e5val, \
231 e6, e6val, \
232 e7, e7val, \
233 e8, e8val, \
234 e9, e9val, \
235 e10, e10val, \
236 e11, e11val, \
237 e12, e12val, \
238 e13, e13val, \
239 e14, e14val, \
240 e15, e15val) \
241( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
258)
259
260/* all CceStatus sub-block freeze bits */
261#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265/* all CceStatus sub-block TXE pause bits */
266#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269/* all CceStatus sub-block RXE pause bits */
270#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
271
Jakub Pawlak2b719042016-07-01 16:01:22 -0700272#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
274
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275/*
276 * CCE Error flags.
277 */
278static struct flag_table cce_err_status_flags[] = {
279/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341/*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
361/*41-63 reserved*/
362};
363
364/*
365 * Misc Error flags
366 */
367#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368static struct flag_table misc_err_status_flags[] = {
369/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
382};
383
384/*
385 * TXE PIO Error flags and consequences
386 */
387static struct flag_table pio_err_status_flags[] = {
388/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
389 SEC_WRITE_DROPPED,
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394/* 2*/ FLAG_ENTRY("PioCsrParity",
395 SEC_SPC_FREEZE,
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
398 SEC_SPC_FREEZE,
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403/* 5*/ FLAG_ENTRY("PioPccFifoParity",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406/* 6*/ FLAG_ENTRY("PioPecFifoParity",
407 SEC_SPC_FREEZE,
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
416 SEC_SPC_FREEZE,
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418/*10*/ FLAG_ENTRY("PioSmPktResetParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
425 SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
428 0,
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
431 0,
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439/*17*/ FLAG_ENTRY("PioInitSmIn",
440 0,
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
449 0,
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451/*21*/ FLAG_ENTRY("PioWriteDataParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454/*22*/ FLAG_ENTRY("PioStateMachine",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
464 SEC_SPC_FREEZE,
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466/*26*/ FLAG_ENTRY("PioVlfSopParity",
467 SEC_SPC_FREEZE,
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469/*27*/ FLAG_ENTRY("PioVlFifoParity",
470 SEC_SPC_FREEZE,
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
473 SEC_SPC_FREEZE,
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475/*29*/ FLAG_ENTRY("PioPpmcSopLen",
476 SEC_SPC_FREEZE,
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
478/*30-31 reserved*/
479/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
480 SEC_SPC_FREEZE,
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
483 SEC_SPC_FREEZE,
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
486 SEC_SPC_FREEZE,
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
489 SEC_SPC_FREEZE,
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
491/*36-63 reserved*/
492};
493
494/* TXE PIO errors that cause an SPC freeze */
495#define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
525
526/*
527 * TXE SDMA Error flags
528 */
529static struct flag_table sdma_err_status_flags[] = {
530/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
538/*04-63 reserved*/
539};
540
541/* TXE SDMA errors that cause an SPC freeze */
542#define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
546
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800547/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548#define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
552
Mike Marciniszyn77241052015-07-30 15:17:43 -0400553/*
554 * TXE Egress Error flags
555 */
556#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557static struct flag_table egress_err_status_flags[] = {
558/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
560/* 2 reserved */
561/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
565/* 6 reserved */
566/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
570/* 9-10 reserved */
571/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
652};
653
654/*
655 * TXE Egress Error Info flags
656 */
657#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658static struct flag_table egress_err_info_flags[] = {
659/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
681};
682
683/* TXE Egress errors that cause an SPC freeze */
684#define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
702
703/*
704 * TXE Send error flags
705 */
706#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500708/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400709/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
711};
712
713/*
714 * TXE Send Context Error flags and consequences
715 */
716static struct flag_table sc_err_status_flags[] = {
717/* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720/* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726/* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
732/* 5-63 reserved*/
733};
734
735/*
736 * RXE Receive Error flags
737 */
738#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739static struct flag_table rxe_err_status_flags[] = {
740/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
816};
817
818/* RXE errors that will trigger an SPC freeze */
819#define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
864
865#define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
869
870/*
871 * DCC Error Flags
872 */
873#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
921};
922
923/*
924 * LCB error flags
925 */
926#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927static struct flag_table lcb_err_flags[] = {
928/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
964};
965
966/*
967 * DC8051 Error Flags
968 */
969#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
982};
983
984/*
985 * DC8051 Information Error flags
986 *
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
988 */
989static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800994 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -08001002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
Dean Luick50921be2016-09-25 07:41:53 -07001003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001006};
1007
1008/*
1009 * DC8051 Information Host Information flags
1010 *
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012 */
1013static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
Bartlomiej Dudekddbf2ef2017-06-09 15:59:26 -07001015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
Bartlomiej Dudekddbf2ef2017-06-09 15:59:26 -07001023 FLAG_ENTRY0("Link width downgraded", 0x0200),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024};
1025
Mike Marciniszyn77241052015-07-30 15:17:43 -04001026static u32 encoded_size(u32 size);
1027static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1030 u8 *continuous);
1031static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1038 u8 *device_rev);
1039static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001059static void set_partition_keys(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001060static const char *link_state_name(u32 state);
1061static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062 u32 state);
1063static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064 u64 *out_data);
1065static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd);
1067
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -07001068static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001069static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 int msecs);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -07001071static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1072 int msecs);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001073static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001074static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001075static void handle_temp_err(struct hfi1_devdata *dd);
1076static void dc_shutdown(struct hfi1_devdata *dd);
1077static void dc_start(struct hfi1_devdata *dd);
Dean Luick8f000f72016-04-12 11:32:06 -07001078static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1079 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001080static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Dean Luickec8a1422017-03-20 17:24:39 -07001081static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001082static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001083
1084/*
1085 * Error interrupt table entry. This is used as input to the interrupt
1086 * "clear down" routine used for all second tier error interrupt register.
1087 * Second tier interrupt registers have a single bit representing them
1088 * in the top-level CceIntStatus.
1089 */
1090struct err_reg_info {
1091 u32 status; /* status CSR offset */
1092 u32 clear; /* clear CSR offset */
1093 u32 mask; /* mask CSR offset */
1094 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1095 const char *desc;
1096};
1097
1098#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1099#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1100#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1101
1102/*
1103 * Helpers for building HFI and DC error interrupt table entries. Different
1104 * helpers are needed because of inconsistent register names.
1105 */
1106#define EE(reg, handler, desc) \
1107 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1108 handler, desc }
1109#define DC_EE1(reg, handler, desc) \
1110 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1111#define DC_EE2(reg, handler, desc) \
1112 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1113
1114/*
1115 * Table of the "misc" grouping of error interrupts. Each entry refers to
1116 * another register containing more information.
1117 */
1118static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1119/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1120/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1121/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1122/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1123/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1124/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1125/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1126/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1127 /* the rest are reserved */
1128};
1129
1130/*
1131 * Index into the Various section of the interrupt sources
1132 * corresponding to the Critical Temperature interrupt.
1133 */
1134#define TCRIT_INT_SOURCE 4
1135
1136/*
1137 * SDMA error interrupt entry - refers to another register containing more
1138 * information.
1139 */
1140static const struct err_reg_info sdma_eng_err =
1141 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1142
1143static const struct err_reg_info various_err[NUM_VARIOUS] = {
1144/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1145/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1146/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1147/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1148/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1149 /* rest are reserved */
1150};
1151
1152/*
1153 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1154 * register can not be derived from the MTU value because 10K is not
1155 * a power of 2. Therefore, we need a constant. Everything else can
1156 * be calculated.
1157 */
1158#define DCC_CFG_PORT_MTU_CAP_10240 7
1159
1160/*
1161 * Table of the DC grouping of error interrupts. Each entry refers to
1162 * another register containing more information.
1163 */
1164static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1165/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1166/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1167/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1168/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1169 /* the rest are reserved */
1170};
1171
1172struct cntr_entry {
1173 /*
1174 * counter name
1175 */
1176 char *name;
1177
1178 /*
1179 * csr to read for name (if applicable)
1180 */
1181 u64 csr;
1182
1183 /*
1184 * offset into dd or ppd to store the counter's value
1185 */
1186 int offset;
1187
1188 /*
1189 * flags
1190 */
1191 u8 flags;
1192
1193 /*
1194 * accessor for stat element, context either dd or ppd
1195 */
Jubin John17fb4f22016-02-14 20:21:52 -08001196 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1197 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001198};
1199
1200#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1201#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1202
1203#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1204{ \
1205 name, \
1206 csr, \
1207 offset, \
1208 flags, \
1209 accessor \
1210}
1211
1212/* 32bit RXE */
1213#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1214CNTR_ELEM(#name, \
1215 (counter * 8 + RCV_COUNTER_ARRAY32), \
1216 0, flags | CNTR_32BIT, \
1217 port_access_u32_csr)
1218
1219#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1220CNTR_ELEM(#name, \
1221 (counter * 8 + RCV_COUNTER_ARRAY32), \
1222 0, flags | CNTR_32BIT, \
1223 dev_access_u32_csr)
1224
1225/* 64bit RXE */
1226#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1227CNTR_ELEM(#name, \
1228 (counter * 8 + RCV_COUNTER_ARRAY64), \
1229 0, flags, \
1230 port_access_u64_csr)
1231
1232#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1233CNTR_ELEM(#name, \
1234 (counter * 8 + RCV_COUNTER_ARRAY64), \
1235 0, flags, \
1236 dev_access_u64_csr)
1237
1238#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1239#define OVR_ELM(ctx) \
1240CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001241 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001242 0, CNTR_NORMAL, port_access_u64_csr)
1243
1244/* 32bit TXE */
1245#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1246CNTR_ELEM(#name, \
1247 (counter * 8 + SEND_COUNTER_ARRAY32), \
1248 0, flags | CNTR_32BIT, \
1249 port_access_u32_csr)
1250
1251/* 64bit TXE */
1252#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1253CNTR_ELEM(#name, \
1254 (counter * 8 + SEND_COUNTER_ARRAY64), \
1255 0, flags, \
1256 port_access_u64_csr)
1257
1258# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1259CNTR_ELEM(#name,\
1260 counter * 8 + SEND_COUNTER_ARRAY64, \
1261 0, \
1262 flags, \
1263 dev_access_u64_csr)
1264
1265/* CCE */
1266#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1267CNTR_ELEM(#name, \
1268 (counter * 8 + CCE_COUNTER_ARRAY32), \
1269 0, flags | CNTR_32BIT, \
1270 dev_access_u32_csr)
1271
1272#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1273CNTR_ELEM(#name, \
1274 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1275 0, flags | CNTR_32BIT, \
1276 dev_access_u32_csr)
1277
1278/* DC */
1279#define DC_PERF_CNTR(name, counter, flags) \
1280CNTR_ELEM(#name, \
1281 counter, \
1282 0, \
1283 flags, \
1284 dev_access_u64_csr)
1285
1286#define DC_PERF_CNTR_LCB(name, counter, flags) \
1287CNTR_ELEM(#name, \
1288 counter, \
1289 0, \
1290 flags, \
1291 dc_access_lcb_cntr)
1292
1293/* ibp counters */
1294#define SW_IBP_CNTR(name, cntr) \
1295CNTR_ELEM(#name, \
1296 0, \
1297 0, \
1298 CNTR_SYNTH, \
1299 access_ibp_##cntr)
1300
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001301/**
1302 * hfi_addr_from_offset - return addr for readq/writeq
1303 * @dd - the dd device
1304 * @offset - the offset of the CSR within bar0
1305 *
1306 * This routine selects the appropriate base address
1307 * based on the indicated offset.
1308 */
1309static inline void __iomem *hfi1_addr_from_offset(
1310 const struct hfi1_devdata *dd,
1311 u32 offset)
1312{
1313 if (offset >= dd->base2_start)
1314 return dd->kregbase2 + (offset - dd->base2_start);
1315 return dd->kregbase1 + offset;
1316}
1317
1318/**
1319 * read_csr - read CSR at the indicated offset
1320 * @dd - the dd device
1321 * @offset - the offset of the CSR within bar0
1322 *
1323 * Return: the value read or all FF's if there
1324 * is no mapping
1325 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001326u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1327{
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001328 if (dd->flags & HFI1_PRESENT)
1329 return readq(hfi1_addr_from_offset(dd, offset));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001330 return -1;
1331}
1332
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001333/**
1334 * write_csr - write CSR at the indicated offset
1335 * @dd - the dd device
1336 * @offset - the offset of the CSR within bar0
1337 * @value - value to write
1338 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001339void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1340{
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001341 if (dd->flags & HFI1_PRESENT) {
1342 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1343
1344 /* avoid write to RcvArray */
1345 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1346 return;
1347 writeq(value, base);
1348 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001349}
1350
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001351/**
1352 * get_csr_addr - return te iomem address for offset
1353 * @dd - the dd device
1354 * @offset - the offset of the CSR within bar0
1355 *
1356 * Return: The iomem address to use in subsequent
1357 * writeq/readq operations.
1358 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359void __iomem *get_csr_addr(
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001360 const struct hfi1_devdata *dd,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001361 u32 offset)
1362{
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001363 if (dd->flags & HFI1_PRESENT)
1364 return hfi1_addr_from_offset(dd, offset);
1365 return NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001366}
1367
1368static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1369 int mode, u64 value)
1370{
1371 u64 ret;
1372
Mike Marciniszyn77241052015-07-30 15:17:43 -04001373 if (mode == CNTR_MODE_R) {
1374 ret = read_csr(dd, csr);
1375 } else if (mode == CNTR_MODE_W) {
1376 write_csr(dd, csr, value);
1377 ret = value;
1378 } else {
1379 dd_dev_err(dd, "Invalid cntr register access mode");
1380 return 0;
1381 }
1382
1383 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1384 return ret;
1385}
1386
1387/* Dev Access */
1388static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001389 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001390{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301391 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001392 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001393
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001394 if (entry->flags & CNTR_SDMA) {
1395 if (vl == CNTR_INVALID_VL)
1396 return 0;
1397 csr += 0x100 * vl;
1398 } else {
1399 if (vl != CNTR_INVALID_VL)
1400 return 0;
1401 }
1402 return read_write_csr(dd, csr, mode, data);
1403}
1404
1405static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1406 void *context, int idx, int mode, u64 data)
1407{
1408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1409
1410 if (dd->per_sdma && idx < dd->num_sdma)
1411 return dd->per_sdma[idx].err_cnt;
1412 return 0;
1413}
1414
1415static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1416 void *context, int idx, int mode, u64 data)
1417{
1418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1419
1420 if (dd->per_sdma && idx < dd->num_sdma)
1421 return dd->per_sdma[idx].sdma_int_cnt;
1422 return 0;
1423}
1424
1425static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1426 void *context, int idx, int mode, u64 data)
1427{
1428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1429
1430 if (dd->per_sdma && idx < dd->num_sdma)
1431 return dd->per_sdma[idx].idle_int_cnt;
1432 return 0;
1433}
1434
1435static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1436 void *context, int idx, int mode,
1437 u64 data)
1438{
1439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1440
1441 if (dd->per_sdma && idx < dd->num_sdma)
1442 return dd->per_sdma[idx].progress_int_cnt;
1443 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001444}
1445
1446static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001447 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001448{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301449 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001450
1451 u64 val = 0;
1452 u64 csr = entry->csr;
1453
1454 if (entry->flags & CNTR_VL) {
1455 if (vl == CNTR_INVALID_VL)
1456 return 0;
1457 csr += 8 * vl;
1458 } else {
1459 if (vl != CNTR_INVALID_VL)
1460 return 0;
1461 }
1462
1463 val = read_write_csr(dd, csr, mode, data);
1464 return val;
1465}
1466
1467static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001468 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001469{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301470 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001471 u32 csr = entry->csr;
1472 int ret = 0;
1473
1474 if (vl != CNTR_INVALID_VL)
1475 return 0;
1476 if (mode == CNTR_MODE_R)
1477 ret = read_lcb_csr(dd, csr, &data);
1478 else if (mode == CNTR_MODE_W)
1479 ret = write_lcb_csr(dd, csr, data);
1480
1481 if (ret) {
1482 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1483 return 0;
1484 }
1485
1486 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1487 return data;
1488}
1489
1490/* Port Access */
1491static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001492 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001493{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301494 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001495
1496 if (vl != CNTR_INVALID_VL)
1497 return 0;
1498 return read_write_csr(ppd->dd, entry->csr, mode, data);
1499}
1500
1501static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001502 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301504 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505 u64 val;
1506 u64 csr = entry->csr;
1507
1508 if (entry->flags & CNTR_VL) {
1509 if (vl == CNTR_INVALID_VL)
1510 return 0;
1511 csr += 8 * vl;
1512 } else {
1513 if (vl != CNTR_INVALID_VL)
1514 return 0;
1515 }
1516 val = read_write_csr(ppd->dd, csr, mode, data);
1517 return val;
1518}
1519
1520/* Software defined */
1521static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1522 u64 data)
1523{
1524 u64 ret;
1525
1526 if (mode == CNTR_MODE_R) {
1527 ret = *cntr;
1528 } else if (mode == CNTR_MODE_W) {
1529 *cntr = data;
1530 ret = data;
1531 } else {
1532 dd_dev_err(dd, "Invalid cntr sw access mode");
1533 return 0;
1534 }
1535
1536 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1537
1538 return ret;
1539}
1540
1541static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001542 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001543{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301544 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001545
1546 if (vl != CNTR_INVALID_VL)
1547 return 0;
1548 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1549}
1550
1551static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001552 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001553{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301554 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001555
1556 if (vl != CNTR_INVALID_VL)
1557 return 0;
1558 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1559}
1560
Dean Luick6d014532015-12-01 15:38:23 -05001561static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1562 void *context, int vl, int mode,
1563 u64 data)
1564{
1565 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1566
1567 if (vl != CNTR_INVALID_VL)
1568 return 0;
1569 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1570}
1571
Mike Marciniszyn77241052015-07-30 15:17:43 -04001572static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001573 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001575 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1576 u64 zero = 0;
1577 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001578
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001579 if (vl == CNTR_INVALID_VL)
1580 counter = &ppd->port_xmit_discards;
1581 else if (vl >= 0 && vl < C_VL_COUNT)
1582 counter = &ppd->port_xmit_discards_vl[vl];
1583 else
1584 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001585
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001586 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001587}
1588
1589static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001590 void *context, int vl, int mode,
1591 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001592{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301593 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001594
1595 if (vl != CNTR_INVALID_VL)
1596 return 0;
1597
1598 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1599 mode, data);
1600}
1601
1602static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001603 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001604{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301605 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
1607 if (vl != CNTR_INVALID_VL)
1608 return 0;
1609
1610 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1611 mode, data);
1612}
1613
1614u64 get_all_cpu_total(u64 __percpu *cntr)
1615{
1616 int cpu;
1617 u64 counter = 0;
1618
1619 for_each_possible_cpu(cpu)
1620 counter += *per_cpu_ptr(cntr, cpu);
1621 return counter;
1622}
1623
1624static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1625 u64 __percpu *cntr,
1626 int vl, int mode, u64 data)
1627{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001628 u64 ret = 0;
1629
1630 if (vl != CNTR_INVALID_VL)
1631 return 0;
1632
1633 if (mode == CNTR_MODE_R) {
1634 ret = get_all_cpu_total(cntr) - *z_val;
1635 } else if (mode == CNTR_MODE_W) {
1636 /* A write can only zero the counter */
1637 if (data == 0)
1638 *z_val = get_all_cpu_total(cntr);
1639 else
1640 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1641 } else {
1642 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1643 return 0;
1644 }
1645
1646 return ret;
1647}
1648
1649static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1650 void *context, int vl, int mode, u64 data)
1651{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301652 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001653
1654 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1655 mode, data);
1656}
1657
1658static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001659 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001660{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301661 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001662
1663 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1664 mode, data);
1665}
1666
1667static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1668 void *context, int vl, int mode, u64 data)
1669{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301670 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001671
1672 return dd->verbs_dev.n_piowait;
1673}
1674
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001675static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1676 void *context, int vl, int mode, u64 data)
1677{
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->verbs_dev.n_piodrain;
1681}
1682
Mike Marciniszyn77241052015-07-30 15:17:43 -04001683static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1684 void *context, int vl, int mode, u64 data)
1685{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301686 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001687
1688 return dd->verbs_dev.n_txwait;
1689}
1690
1691static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1692 void *context, int vl, int mode, u64 data)
1693{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301694 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001695
1696 return dd->verbs_dev.n_kmem_wait;
1697}
1698
Dean Luickb4219222015-10-26 10:28:35 -04001699static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001700 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001701{
1702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1703
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001704 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1705 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001706}
1707
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001708/* Software counters for the error status bits within MISC_ERR_STATUS */
1709static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1710 void *context, int vl, int mode,
1711 u64 data)
1712{
1713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1714
1715 return dd->misc_err_status_cnt[12];
1716}
1717
1718static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1719 void *context, int vl, int mode,
1720 u64 data)
1721{
1722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1723
1724 return dd->misc_err_status_cnt[11];
1725}
1726
1727static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1728 void *context, int vl, int mode,
1729 u64 data)
1730{
1731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1732
1733 return dd->misc_err_status_cnt[10];
1734}
1735
1736static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1737 void *context, int vl,
1738 int mode, u64 data)
1739{
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742 return dd->misc_err_status_cnt[9];
1743}
1744
1745static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1746 void *context, int vl, int mode,
1747 u64 data)
1748{
1749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1750
1751 return dd->misc_err_status_cnt[8];
1752}
1753
1754static u64 access_misc_efuse_read_bad_addr_err_cnt(
1755 const struct cntr_entry *entry,
1756 void *context, int vl, int mode, u64 data)
1757{
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759
1760 return dd->misc_err_status_cnt[7];
1761}
1762
1763static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl,
1765 int mode, u64 data)
1766{
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->misc_err_status_cnt[6];
1770}
1771
1772static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775{
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->misc_err_status_cnt[5];
1779}
1780
1781static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1782 void *context, int vl, int mode,
1783 u64 data)
1784{
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->misc_err_status_cnt[4];
1788}
1789
1790static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1791 void *context, int vl,
1792 int mode, u64 data)
1793{
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->misc_err_status_cnt[3];
1797}
1798
1799static u64 access_misc_csr_write_bad_addr_err_cnt(
1800 const struct cntr_entry *entry,
1801 void *context, int vl, int mode, u64 data)
1802{
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->misc_err_status_cnt[2];
1806}
1807
1808static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1809 void *context, int vl,
1810 int mode, u64 data)
1811{
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->misc_err_status_cnt[1];
1815}
1816
1817static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1818 void *context, int vl, int mode,
1819 u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->misc_err_status_cnt[0];
1824}
1825
1826/*
1827 * Software counter for the aggregate of
1828 * individual CceErrStatus counters
1829 */
1830static u64 access_sw_cce_err_status_aggregated_cnt(
1831 const struct cntr_entry *entry,
1832 void *context, int vl, int mode, u64 data)
1833{
1834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1835
1836 return dd->sw_cce_err_status_aggregate;
1837}
1838
1839/*
1840 * Software counters corresponding to each of the
1841 * error status bits within CceErrStatus
1842 */
1843static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode,
1845 u64 data)
1846{
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[40];
1850}
1851
1852static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1853 void *context, int vl, int mode,
1854 u64 data)
1855{
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[39];
1859}
1860
1861static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1862 void *context, int vl, int mode,
1863 u64 data)
1864{
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[38];
1868}
1869
1870static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1871 void *context, int vl, int mode,
1872 u64 data)
1873{
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[37];
1877}
1878
1879static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1880 void *context, int vl, int mode,
1881 u64 data)
1882{
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[36];
1886}
1887
1888static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1889 const struct cntr_entry *entry,
1890 void *context, int vl, int mode, u64 data)
1891{
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[35];
1895}
1896
1897static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1898 const struct cntr_entry *entry,
1899 void *context, int vl, int mode, u64 data)
1900{
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[34];
1904}
1905
1906static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1907 void *context, int vl,
1908 int mode, u64 data)
1909{
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[33];
1913}
1914
1915static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1916 void *context, int vl, int mode,
1917 u64 data)
1918{
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[32];
1922}
1923
1924static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1925 void *context, int vl, int mode, u64 data)
1926{
1927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1928
1929 return dd->cce_err_status_cnt[31];
1930}
1931
1932static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1933 void *context, int vl, int mode,
1934 u64 data)
1935{
1936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1937
1938 return dd->cce_err_status_cnt[30];
1939}
1940
1941static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1942 void *context, int vl, int mode,
1943 u64 data)
1944{
1945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1946
1947 return dd->cce_err_status_cnt[29];
1948}
1949
1950static u64 access_pcic_transmit_back_parity_err_cnt(
1951 const struct cntr_entry *entry,
1952 void *context, int vl, int mode, u64 data)
1953{
1954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1955
1956 return dd->cce_err_status_cnt[28];
1957}
1958
1959static u64 access_pcic_transmit_front_parity_err_cnt(
1960 const struct cntr_entry *entry,
1961 void *context, int vl, int mode, u64 data)
1962{
1963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1964
1965 return dd->cce_err_status_cnt[27];
1966}
1967
1968static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1969 void *context, int vl, int mode,
1970 u64 data)
1971{
1972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1973
1974 return dd->cce_err_status_cnt[26];
1975}
1976
1977static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1978 void *context, int vl, int mode,
1979 u64 data)
1980{
1981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1982
1983 return dd->cce_err_status_cnt[25];
1984}
1985
1986static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1987 void *context, int vl, int mode,
1988 u64 data)
1989{
1990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1991
1992 return dd->cce_err_status_cnt[24];
1993}
1994
1995static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1996 void *context, int vl, int mode,
1997 u64 data)
1998{
1999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2000
2001 return dd->cce_err_status_cnt[23];
2002}
2003
2004static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2005 void *context, int vl,
2006 int mode, u64 data)
2007{
2008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2009
2010 return dd->cce_err_status_cnt[22];
2011}
2012
2013static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2014 void *context, int vl, int mode,
2015 u64 data)
2016{
2017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2018
2019 return dd->cce_err_status_cnt[21];
2020}
2021
2022static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2023 const struct cntr_entry *entry,
2024 void *context, int vl, int mode, u64 data)
2025{
2026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2027
2028 return dd->cce_err_status_cnt[20];
2029}
2030
2031static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2032 void *context, int vl,
2033 int mode, u64 data)
2034{
2035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2036
2037 return dd->cce_err_status_cnt[19];
2038}
2039
2040static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2041 void *context, int vl, int mode,
2042 u64 data)
2043{
2044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2045
2046 return dd->cce_err_status_cnt[18];
2047}
2048
2049static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2050 void *context, int vl, int mode,
2051 u64 data)
2052{
2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054
2055 return dd->cce_err_status_cnt[17];
2056}
2057
2058static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2059 void *context, int vl, int mode,
2060 u64 data)
2061{
2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063
2064 return dd->cce_err_status_cnt[16];
2065}
2066
2067static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2068 void *context, int vl, int mode,
2069 u64 data)
2070{
2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072
2073 return dd->cce_err_status_cnt[15];
2074}
2075
2076static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2077 void *context, int vl,
2078 int mode, u64 data)
2079{
2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081
2082 return dd->cce_err_status_cnt[14];
2083}
2084
2085static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2086 void *context, int vl, int mode,
2087 u64 data)
2088{
2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090
2091 return dd->cce_err_status_cnt[13];
2092}
2093
2094static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2095 const struct cntr_entry *entry,
2096 void *context, int vl, int mode, u64 data)
2097{
2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099
2100 return dd->cce_err_status_cnt[12];
2101}
2102
2103static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2104 const struct cntr_entry *entry,
2105 void *context, int vl, int mode, u64 data)
2106{
2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108
2109 return dd->cce_err_status_cnt[11];
2110}
2111
2112static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2113 const struct cntr_entry *entry,
2114 void *context, int vl, int mode, u64 data)
2115{
2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117
2118 return dd->cce_err_status_cnt[10];
2119}
2120
2121static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2122 const struct cntr_entry *entry,
2123 void *context, int vl, int mode, u64 data)
2124{
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127 return dd->cce_err_status_cnt[9];
2128}
2129
2130static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2131 const struct cntr_entry *entry,
2132 void *context, int vl, int mode, u64 data)
2133{
2134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2135
2136 return dd->cce_err_status_cnt[8];
2137}
2138
2139static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2140 void *context, int vl,
2141 int mode, u64 data)
2142{
2143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2144
2145 return dd->cce_err_status_cnt[7];
2146}
2147
2148static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2149 const struct cntr_entry *entry,
2150 void *context, int vl, int mode, u64 data)
2151{
2152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2153
2154 return dd->cce_err_status_cnt[6];
2155}
2156
2157static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2158 void *context, int vl, int mode,
2159 u64 data)
2160{
2161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2162
2163 return dd->cce_err_status_cnt[5];
2164}
2165
2166static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2167 void *context, int vl, int mode,
2168 u64 data)
2169{
2170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2171
2172 return dd->cce_err_status_cnt[4];
2173}
2174
2175static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2176 const struct cntr_entry *entry,
2177 void *context, int vl, int mode, u64 data)
2178{
2179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2180
2181 return dd->cce_err_status_cnt[3];
2182}
2183
2184static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2185 void *context, int vl,
2186 int mode, u64 data)
2187{
2188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2189
2190 return dd->cce_err_status_cnt[2];
2191}
2192
2193static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2194 void *context, int vl,
2195 int mode, u64 data)
2196{
2197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2198
2199 return dd->cce_err_status_cnt[1];
2200}
2201
2202static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2203 void *context, int vl, int mode,
2204 u64 data)
2205{
2206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2207
2208 return dd->cce_err_status_cnt[0];
2209}
2210
2211/*
2212 * Software counters corresponding to each of the
2213 * error status bits within RcvErrStatus
2214 */
2215static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2216 void *context, int vl, int mode,
2217 u64 data)
2218{
2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220
2221 return dd->rcv_err_status_cnt[63];
2222}
2223
2224static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2225 void *context, int vl,
2226 int mode, u64 data)
2227{
2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2229
2230 return dd->rcv_err_status_cnt[62];
2231}
2232
2233static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2234 void *context, int vl, int mode,
2235 u64 data)
2236{
2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2238
2239 return dd->rcv_err_status_cnt[61];
2240}
2241
2242static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2243 void *context, int vl, int mode,
2244 u64 data)
2245{
2246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2247
2248 return dd->rcv_err_status_cnt[60];
2249}
2250
2251static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2252 void *context, int vl,
2253 int mode, u64 data)
2254{
2255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2256
2257 return dd->rcv_err_status_cnt[59];
2258}
2259
2260static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2261 void *context, int vl,
2262 int mode, u64 data)
2263{
2264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2265
2266 return dd->rcv_err_status_cnt[58];
2267}
2268
2269static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2270 void *context, int vl, int mode,
2271 u64 data)
2272{
2273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2274
2275 return dd->rcv_err_status_cnt[57];
2276}
2277
2278static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2279 void *context, int vl, int mode,
2280 u64 data)
2281{
2282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2283
2284 return dd->rcv_err_status_cnt[56];
2285}
2286
2287static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2288 void *context, int vl, int mode,
2289 u64 data)
2290{
2291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2292
2293 return dd->rcv_err_status_cnt[55];
2294}
2295
2296static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2297 const struct cntr_entry *entry,
2298 void *context, int vl, int mode, u64 data)
2299{
2300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2301
2302 return dd->rcv_err_status_cnt[54];
2303}
2304
2305static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2306 const struct cntr_entry *entry,
2307 void *context, int vl, int mode, u64 data)
2308{
2309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2310
2311 return dd->rcv_err_status_cnt[53];
2312}
2313
2314static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2315 void *context, int vl,
2316 int mode, u64 data)
2317{
2318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2319
2320 return dd->rcv_err_status_cnt[52];
2321}
2322
2323static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2324 void *context, int vl,
2325 int mode, u64 data)
2326{
2327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2328
2329 return dd->rcv_err_status_cnt[51];
2330}
2331
2332static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2333 void *context, int vl,
2334 int mode, u64 data)
2335{
2336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2337
2338 return dd->rcv_err_status_cnt[50];
2339}
2340
2341static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2342 void *context, int vl,
2343 int mode, u64 data)
2344{
2345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2346
2347 return dd->rcv_err_status_cnt[49];
2348}
2349
2350static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2351 void *context, int vl,
2352 int mode, u64 data)
2353{
2354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2355
2356 return dd->rcv_err_status_cnt[48];
2357}
2358
2359static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2360 void *context, int vl,
2361 int mode, u64 data)
2362{
2363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2364
2365 return dd->rcv_err_status_cnt[47];
2366}
2367
2368static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2369 void *context, int vl, int mode,
2370 u64 data)
2371{
2372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2373
2374 return dd->rcv_err_status_cnt[46];
2375}
2376
2377static u64 access_rx_hq_intr_csr_parity_err_cnt(
2378 const struct cntr_entry *entry,
2379 void *context, int vl, int mode, u64 data)
2380{
2381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2382
2383 return dd->rcv_err_status_cnt[45];
2384}
2385
2386static u64 access_rx_lookup_csr_parity_err_cnt(
2387 const struct cntr_entry *entry,
2388 void *context, int vl, int mode, u64 data)
2389{
2390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2391
2392 return dd->rcv_err_status_cnt[44];
2393}
2394
2395static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2396 const struct cntr_entry *entry,
2397 void *context, int vl, int mode, u64 data)
2398{
2399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2400
2401 return dd->rcv_err_status_cnt[43];
2402}
2403
2404static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2405 const struct cntr_entry *entry,
2406 void *context, int vl, int mode, u64 data)
2407{
2408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2409
2410 return dd->rcv_err_status_cnt[42];
2411}
2412
2413static u64 access_rx_lookup_des_part2_parity_err_cnt(
2414 const struct cntr_entry *entry,
2415 void *context, int vl, int mode, u64 data)
2416{
2417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2418
2419 return dd->rcv_err_status_cnt[41];
2420}
2421
2422static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2423 const struct cntr_entry *entry,
2424 void *context, int vl, int mode, u64 data)
2425{
2426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2427
2428 return dd->rcv_err_status_cnt[40];
2429}
2430
2431static u64 access_rx_lookup_des_part1_unc_err_cnt(
2432 const struct cntr_entry *entry,
2433 void *context, int vl, int mode, u64 data)
2434{
2435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2436
2437 return dd->rcv_err_status_cnt[39];
2438}
2439
2440static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2441 const struct cntr_entry *entry,
2442 void *context, int vl, int mode, u64 data)
2443{
2444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2445
2446 return dd->rcv_err_status_cnt[38];
2447}
2448
2449static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2450 const struct cntr_entry *entry,
2451 void *context, int vl, int mode, u64 data)
2452{
2453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2454
2455 return dd->rcv_err_status_cnt[37];
2456}
2457
2458static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2459 const struct cntr_entry *entry,
2460 void *context, int vl, int mode, u64 data)
2461{
2462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2463
2464 return dd->rcv_err_status_cnt[36];
2465}
2466
2467static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2468 const struct cntr_entry *entry,
2469 void *context, int vl, int mode, u64 data)
2470{
2471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2472
2473 return dd->rcv_err_status_cnt[35];
2474}
2475
2476static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2477 const struct cntr_entry *entry,
2478 void *context, int vl, int mode, u64 data)
2479{
2480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2481
2482 return dd->rcv_err_status_cnt[34];
2483}
2484
2485static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2486 const struct cntr_entry *entry,
2487 void *context, int vl, int mode, u64 data)
2488{
2489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2490
2491 return dd->rcv_err_status_cnt[33];
2492}
2493
2494static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2495 void *context, int vl, int mode,
2496 u64 data)
2497{
2498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2499
2500 return dd->rcv_err_status_cnt[32];
2501}
2502
2503static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2504 void *context, int vl, int mode,
2505 u64 data)
2506{
2507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2508
2509 return dd->rcv_err_status_cnt[31];
2510}
2511
2512static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2513 void *context, int vl, int mode,
2514 u64 data)
2515{
2516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2517
2518 return dd->rcv_err_status_cnt[30];
2519}
2520
2521static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2522 void *context, int vl, int mode,
2523 u64 data)
2524{
2525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2526
2527 return dd->rcv_err_status_cnt[29];
2528}
2529
2530static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2531 void *context, int vl,
2532 int mode, u64 data)
2533{
2534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2535
2536 return dd->rcv_err_status_cnt[28];
2537}
2538
2539static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2540 const struct cntr_entry *entry,
2541 void *context, int vl, int mode, u64 data)
2542{
2543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2544
2545 return dd->rcv_err_status_cnt[27];
2546}
2547
2548static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2549 const struct cntr_entry *entry,
2550 void *context, int vl, int mode, u64 data)
2551{
2552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2553
2554 return dd->rcv_err_status_cnt[26];
2555}
2556
2557static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2558 const struct cntr_entry *entry,
2559 void *context, int vl, int mode, u64 data)
2560{
2561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2562
2563 return dd->rcv_err_status_cnt[25];
2564}
2565
2566static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2567 const struct cntr_entry *entry,
2568 void *context, int vl, int mode, u64 data)
2569{
2570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2571
2572 return dd->rcv_err_status_cnt[24];
2573}
2574
2575static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2576 const struct cntr_entry *entry,
2577 void *context, int vl, int mode, u64 data)
2578{
2579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2580
2581 return dd->rcv_err_status_cnt[23];
2582}
2583
2584static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2585 const struct cntr_entry *entry,
2586 void *context, int vl, int mode, u64 data)
2587{
2588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2589
2590 return dd->rcv_err_status_cnt[22];
2591}
2592
2593static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2594 const struct cntr_entry *entry,
2595 void *context, int vl, int mode, u64 data)
2596{
2597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2598
2599 return dd->rcv_err_status_cnt[21];
2600}
2601
2602static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2603 const struct cntr_entry *entry,
2604 void *context, int vl, int mode, u64 data)
2605{
2606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2607
2608 return dd->rcv_err_status_cnt[20];
2609}
2610
2611static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2612 const struct cntr_entry *entry,
2613 void *context, int vl, int mode, u64 data)
2614{
2615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2616
2617 return dd->rcv_err_status_cnt[19];
2618}
2619
2620static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2621 void *context, int vl,
2622 int mode, u64 data)
2623{
2624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2625
2626 return dd->rcv_err_status_cnt[18];
2627}
2628
2629static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2630 void *context, int vl,
2631 int mode, u64 data)
2632{
2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634
2635 return dd->rcv_err_status_cnt[17];
2636}
2637
2638static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2639 const struct cntr_entry *entry,
2640 void *context, int vl, int mode, u64 data)
2641{
2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643
2644 return dd->rcv_err_status_cnt[16];
2645}
2646
2647static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2648 const struct cntr_entry *entry,
2649 void *context, int vl, int mode, u64 data)
2650{
2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652
2653 return dd->rcv_err_status_cnt[15];
2654}
2655
2656static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2657 void *context, int vl,
2658 int mode, u64 data)
2659{
2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661
2662 return dd->rcv_err_status_cnt[14];
2663}
2664
2665static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2666 void *context, int vl,
2667 int mode, u64 data)
2668{
2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670
2671 return dd->rcv_err_status_cnt[13];
2672}
2673
2674static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2675 void *context, int vl, int mode,
2676 u64 data)
2677{
2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679
2680 return dd->rcv_err_status_cnt[12];
2681}
2682
2683static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2684 void *context, int vl, int mode,
2685 u64 data)
2686{
2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688
2689 return dd->rcv_err_status_cnt[11];
2690}
2691
2692static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2693 void *context, int vl, int mode,
2694 u64 data)
2695{
2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697
2698 return dd->rcv_err_status_cnt[10];
2699}
2700
2701static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2702 void *context, int vl, int mode,
2703 u64 data)
2704{
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707 return dd->rcv_err_status_cnt[9];
2708}
2709
2710static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2711 void *context, int vl, int mode,
2712 u64 data)
2713{
2714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2715
2716 return dd->rcv_err_status_cnt[8];
2717}
2718
2719static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2720 const struct cntr_entry *entry,
2721 void *context, int vl, int mode, u64 data)
2722{
2723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2724
2725 return dd->rcv_err_status_cnt[7];
2726}
2727
2728static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2729 const struct cntr_entry *entry,
2730 void *context, int vl, int mode, u64 data)
2731{
2732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2733
2734 return dd->rcv_err_status_cnt[6];
2735}
2736
2737static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2738 void *context, int vl, int mode,
2739 u64 data)
2740{
2741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2742
2743 return dd->rcv_err_status_cnt[5];
2744}
2745
2746static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2747 void *context, int vl, int mode,
2748 u64 data)
2749{
2750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2751
2752 return dd->rcv_err_status_cnt[4];
2753}
2754
2755static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2756 void *context, int vl, int mode,
2757 u64 data)
2758{
2759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2760
2761 return dd->rcv_err_status_cnt[3];
2762}
2763
2764static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2765 void *context, int vl, int mode,
2766 u64 data)
2767{
2768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2769
2770 return dd->rcv_err_status_cnt[2];
2771}
2772
2773static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2774 void *context, int vl, int mode,
2775 u64 data)
2776{
2777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2778
2779 return dd->rcv_err_status_cnt[1];
2780}
2781
2782static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2783 void *context, int vl, int mode,
2784 u64 data)
2785{
2786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2787
2788 return dd->rcv_err_status_cnt[0];
2789}
2790
2791/*
2792 * Software counters corresponding to each of the
2793 * error status bits within SendPioErrStatus
2794 */
2795static u64 access_pio_pec_sop_head_parity_err_cnt(
2796 const struct cntr_entry *entry,
2797 void *context, int vl, int mode, u64 data)
2798{
2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800
2801 return dd->send_pio_err_status_cnt[35];
2802}
2803
2804static u64 access_pio_pcc_sop_head_parity_err_cnt(
2805 const struct cntr_entry *entry,
2806 void *context, int vl, int mode, u64 data)
2807{
2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2809
2810 return dd->send_pio_err_status_cnt[34];
2811}
2812
2813static u64 access_pio_last_returned_cnt_parity_err_cnt(
2814 const struct cntr_entry *entry,
2815 void *context, int vl, int mode, u64 data)
2816{
2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2818
2819 return dd->send_pio_err_status_cnt[33];
2820}
2821
2822static u64 access_pio_current_free_cnt_parity_err_cnt(
2823 const struct cntr_entry *entry,
2824 void *context, int vl, int mode, u64 data)
2825{
2826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2827
2828 return dd->send_pio_err_status_cnt[32];
2829}
2830
2831static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2832 void *context, int vl, int mode,
2833 u64 data)
2834{
2835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2836
2837 return dd->send_pio_err_status_cnt[31];
2838}
2839
2840static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2841 void *context, int vl, int mode,
2842 u64 data)
2843{
2844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2845
2846 return dd->send_pio_err_status_cnt[30];
2847}
2848
2849static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2850 void *context, int vl, int mode,
2851 u64 data)
2852{
2853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2854
2855 return dd->send_pio_err_status_cnt[29];
2856}
2857
2858static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2859 const struct cntr_entry *entry,
2860 void *context, int vl, int mode, u64 data)
2861{
2862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2863
2864 return dd->send_pio_err_status_cnt[28];
2865}
2866
2867static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2868 void *context, int vl, int mode,
2869 u64 data)
2870{
2871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2872
2873 return dd->send_pio_err_status_cnt[27];
2874}
2875
2876static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2877 void *context, int vl, int mode,
2878 u64 data)
2879{
2880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2881
2882 return dd->send_pio_err_status_cnt[26];
2883}
2884
2885static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2886 void *context, int vl,
2887 int mode, u64 data)
2888{
2889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2890
2891 return dd->send_pio_err_status_cnt[25];
2892}
2893
2894static u64 access_pio_block_qw_count_parity_err_cnt(
2895 const struct cntr_entry *entry,
2896 void *context, int vl, int mode, u64 data)
2897{
2898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2899
2900 return dd->send_pio_err_status_cnt[24];
2901}
2902
2903static u64 access_pio_write_qw_valid_parity_err_cnt(
2904 const struct cntr_entry *entry,
2905 void *context, int vl, int mode, u64 data)
2906{
2907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2908
2909 return dd->send_pio_err_status_cnt[23];
2910}
2911
2912static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2913 void *context, int vl, int mode,
2914 u64 data)
2915{
2916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2917
2918 return dd->send_pio_err_status_cnt[22];
2919}
2920
2921static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2922 void *context, int vl,
2923 int mode, u64 data)
2924{
2925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2926
2927 return dd->send_pio_err_status_cnt[21];
2928}
2929
2930static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2931 void *context, int vl,
2932 int mode, u64 data)
2933{
2934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2935
2936 return dd->send_pio_err_status_cnt[20];
2937}
2938
2939static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2940 void *context, int vl,
2941 int mode, u64 data)
2942{
2943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2944
2945 return dd->send_pio_err_status_cnt[19];
2946}
2947
2948static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2949 const struct cntr_entry *entry,
2950 void *context, int vl, int mode, u64 data)
2951{
2952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2953
2954 return dd->send_pio_err_status_cnt[18];
2955}
2956
2957static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2958 void *context, int vl, int mode,
2959 u64 data)
2960{
2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962
2963 return dd->send_pio_err_status_cnt[17];
2964}
2965
2966static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2967 void *context, int vl, int mode,
2968 u64 data)
2969{
2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971
2972 return dd->send_pio_err_status_cnt[16];
2973}
2974
2975static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2976 const struct cntr_entry *entry,
2977 void *context, int vl, int mode, u64 data)
2978{
2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980
2981 return dd->send_pio_err_status_cnt[15];
2982}
2983
2984static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2985 const struct cntr_entry *entry,
2986 void *context, int vl, int mode, u64 data)
2987{
2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989
2990 return dd->send_pio_err_status_cnt[14];
2991}
2992
2993static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2994 const struct cntr_entry *entry,
2995 void *context, int vl, int mode, u64 data)
2996{
2997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2998
2999 return dd->send_pio_err_status_cnt[13];
3000}
3001
3002static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3003 const struct cntr_entry *entry,
3004 void *context, int vl, int mode, u64 data)
3005{
3006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3007
3008 return dd->send_pio_err_status_cnt[12];
3009}
3010
3011static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3012 const struct cntr_entry *entry,
3013 void *context, int vl, int mode, u64 data)
3014{
3015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3016
3017 return dd->send_pio_err_status_cnt[11];
3018}
3019
3020static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3021 const struct cntr_entry *entry,
3022 void *context, int vl, int mode, u64 data)
3023{
3024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3025
3026 return dd->send_pio_err_status_cnt[10];
3027}
3028
3029static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3030 const struct cntr_entry *entry,
3031 void *context, int vl, int mode, u64 data)
3032{
3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034
3035 return dd->send_pio_err_status_cnt[9];
3036}
3037
3038static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3039 const struct cntr_entry *entry,
3040 void *context, int vl, int mode, u64 data)
3041{
3042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3043
3044 return dd->send_pio_err_status_cnt[8];
3045}
3046
3047static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3048 const struct cntr_entry *entry,
3049 void *context, int vl, int mode, u64 data)
3050{
3051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3052
3053 return dd->send_pio_err_status_cnt[7];
3054}
3055
3056static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3057 void *context, int vl, int mode,
3058 u64 data)
3059{
3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061
3062 return dd->send_pio_err_status_cnt[6];
3063}
3064
3065static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3066 void *context, int vl, int mode,
3067 u64 data)
3068{
3069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3070
3071 return dd->send_pio_err_status_cnt[5];
3072}
3073
3074static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3075 void *context, int vl, int mode,
3076 u64 data)
3077{
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079
3080 return dd->send_pio_err_status_cnt[4];
3081}
3082
3083static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3084 void *context, int vl, int mode,
3085 u64 data)
3086{
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_pio_err_status_cnt[3];
3090}
3091
3092static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3093 void *context, int vl, int mode,
3094 u64 data)
3095{
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_pio_err_status_cnt[2];
3099}
3100
3101static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl,
3103 int mode, u64 data)
3104{
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_pio_err_status_cnt[1];
3108}
3109
3110static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3111 void *context, int vl, int mode,
3112 u64 data)
3113{
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_pio_err_status_cnt[0];
3117}
3118
3119/*
3120 * Software counters corresponding to each of the
3121 * error status bits within SendDmaErrStatus
3122 */
3123static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3124 const struct cntr_entry *entry,
3125 void *context, int vl, int mode, u64 data)
3126{
3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128
3129 return dd->send_dma_err_status_cnt[3];
3130}
3131
3132static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3133 const struct cntr_entry *entry,
3134 void *context, int vl, int mode, u64 data)
3135{
3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3137
3138 return dd->send_dma_err_status_cnt[2];
3139}
3140
3141static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3142 void *context, int vl, int mode,
3143 u64 data)
3144{
3145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3146
3147 return dd->send_dma_err_status_cnt[1];
3148}
3149
3150static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3151 void *context, int vl, int mode,
3152 u64 data)
3153{
3154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3155
3156 return dd->send_dma_err_status_cnt[0];
3157}
3158
3159/*
3160 * Software counters corresponding to each of the
3161 * error status bits within SendEgressErrStatus
3162 */
3163static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3164 const struct cntr_entry *entry,
3165 void *context, int vl, int mode, u64 data)
3166{
3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168
3169 return dd->send_egress_err_status_cnt[63];
3170}
3171
3172static u64 access_tx_read_sdma_memory_csr_err_cnt(
3173 const struct cntr_entry *entry,
3174 void *context, int vl, int mode, u64 data)
3175{
3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3177
3178 return dd->send_egress_err_status_cnt[62];
3179}
3180
3181static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3182 void *context, int vl, int mode,
3183 u64 data)
3184{
3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3186
3187 return dd->send_egress_err_status_cnt[61];
3188}
3189
3190static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3191 void *context, int vl,
3192 int mode, u64 data)
3193{
3194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3195
3196 return dd->send_egress_err_status_cnt[60];
3197}
3198
3199static u64 access_tx_read_sdma_memory_cor_err_cnt(
3200 const struct cntr_entry *entry,
3201 void *context, int vl, int mode, u64 data)
3202{
3203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3204
3205 return dd->send_egress_err_status_cnt[59];
3206}
3207
3208static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3209 void *context, int vl, int mode,
3210 u64 data)
3211{
3212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3213
3214 return dd->send_egress_err_status_cnt[58];
3215}
3216
3217static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3218 void *context, int vl, int mode,
3219 u64 data)
3220{
3221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3222
3223 return dd->send_egress_err_status_cnt[57];
3224}
3225
3226static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3227 void *context, int vl, int mode,
3228 u64 data)
3229{
3230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3231
3232 return dd->send_egress_err_status_cnt[56];
3233}
3234
3235static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3236 void *context, int vl, int mode,
3237 u64 data)
3238{
3239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3240
3241 return dd->send_egress_err_status_cnt[55];
3242}
3243
3244static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3245 void *context, int vl, int mode,
3246 u64 data)
3247{
3248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3249
3250 return dd->send_egress_err_status_cnt[54];
3251}
3252
3253static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3254 void *context, int vl, int mode,
3255 u64 data)
3256{
3257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3258
3259 return dd->send_egress_err_status_cnt[53];
3260}
3261
3262static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3263 void *context, int vl, int mode,
3264 u64 data)
3265{
3266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3267
3268 return dd->send_egress_err_status_cnt[52];
3269}
3270
3271static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3272 void *context, int vl, int mode,
3273 u64 data)
3274{
3275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3276
3277 return dd->send_egress_err_status_cnt[51];
3278}
3279
3280static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3281 void *context, int vl, int mode,
3282 u64 data)
3283{
3284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3285
3286 return dd->send_egress_err_status_cnt[50];
3287}
3288
3289static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3290 void *context, int vl, int mode,
3291 u64 data)
3292{
3293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3294
3295 return dd->send_egress_err_status_cnt[49];
3296}
3297
3298static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3299 void *context, int vl, int mode,
3300 u64 data)
3301{
3302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3303
3304 return dd->send_egress_err_status_cnt[48];
3305}
3306
3307static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3308 void *context, int vl, int mode,
3309 u64 data)
3310{
3311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3312
3313 return dd->send_egress_err_status_cnt[47];
3314}
3315
3316static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3317 void *context, int vl, int mode,
3318 u64 data)
3319{
3320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3321
3322 return dd->send_egress_err_status_cnt[46];
3323}
3324
3325static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3326 void *context, int vl, int mode,
3327 u64 data)
3328{
3329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3330
3331 return dd->send_egress_err_status_cnt[45];
3332}
3333
3334static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3335 void *context, int vl,
3336 int mode, u64 data)
3337{
3338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3339
3340 return dd->send_egress_err_status_cnt[44];
3341}
3342
3343static u64 access_tx_read_sdma_memory_unc_err_cnt(
3344 const struct cntr_entry *entry,
3345 void *context, int vl, int mode, u64 data)
3346{
3347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3348
3349 return dd->send_egress_err_status_cnt[43];
3350}
3351
3352static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3353 void *context, int vl, int mode,
3354 u64 data)
3355{
3356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3357
3358 return dd->send_egress_err_status_cnt[42];
3359}
3360
3361static u64 access_tx_credit_return_partiy_err_cnt(
3362 const struct cntr_entry *entry,
3363 void *context, int vl, int mode, u64 data)
3364{
3365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3366
3367 return dd->send_egress_err_status_cnt[41];
3368}
3369
3370static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3371 const struct cntr_entry *entry,
3372 void *context, int vl, int mode, u64 data)
3373{
3374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3375
3376 return dd->send_egress_err_status_cnt[40];
3377}
3378
3379static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3380 const struct cntr_entry *entry,
3381 void *context, int vl, int mode, u64 data)
3382{
3383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3384
3385 return dd->send_egress_err_status_cnt[39];
3386}
3387
3388static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3389 const struct cntr_entry *entry,
3390 void *context, int vl, int mode, u64 data)
3391{
3392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3393
3394 return dd->send_egress_err_status_cnt[38];
3395}
3396
3397static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3398 const struct cntr_entry *entry,
3399 void *context, int vl, int mode, u64 data)
3400{
3401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3402
3403 return dd->send_egress_err_status_cnt[37];
3404}
3405
3406static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3407 const struct cntr_entry *entry,
3408 void *context, int vl, int mode, u64 data)
3409{
3410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3411
3412 return dd->send_egress_err_status_cnt[36];
3413}
3414
3415static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3416 const struct cntr_entry *entry,
3417 void *context, int vl, int mode, u64 data)
3418{
3419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3420
3421 return dd->send_egress_err_status_cnt[35];
3422}
3423
3424static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3425 const struct cntr_entry *entry,
3426 void *context, int vl, int mode, u64 data)
3427{
3428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3429
3430 return dd->send_egress_err_status_cnt[34];
3431}
3432
3433static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3434 const struct cntr_entry *entry,
3435 void *context, int vl, int mode, u64 data)
3436{
3437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3438
3439 return dd->send_egress_err_status_cnt[33];
3440}
3441
3442static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3443 const struct cntr_entry *entry,
3444 void *context, int vl, int mode, u64 data)
3445{
3446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3447
3448 return dd->send_egress_err_status_cnt[32];
3449}
3450
3451static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3452 const struct cntr_entry *entry,
3453 void *context, int vl, int mode, u64 data)
3454{
3455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3456
3457 return dd->send_egress_err_status_cnt[31];
3458}
3459
3460static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3461 const struct cntr_entry *entry,
3462 void *context, int vl, int mode, u64 data)
3463{
3464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3465
3466 return dd->send_egress_err_status_cnt[30];
3467}
3468
3469static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3470 const struct cntr_entry *entry,
3471 void *context, int vl, int mode, u64 data)
3472{
3473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3474
3475 return dd->send_egress_err_status_cnt[29];
3476}
3477
3478static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3479 const struct cntr_entry *entry,
3480 void *context, int vl, int mode, u64 data)
3481{
3482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3483
3484 return dd->send_egress_err_status_cnt[28];
3485}
3486
3487static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3488 const struct cntr_entry *entry,
3489 void *context, int vl, int mode, u64 data)
3490{
3491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3492
3493 return dd->send_egress_err_status_cnt[27];
3494}
3495
3496static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3497 const struct cntr_entry *entry,
3498 void *context, int vl, int mode, u64 data)
3499{
3500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3501
3502 return dd->send_egress_err_status_cnt[26];
3503}
3504
3505static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3506 const struct cntr_entry *entry,
3507 void *context, int vl, int mode, u64 data)
3508{
3509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3510
3511 return dd->send_egress_err_status_cnt[25];
3512}
3513
3514static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3515 const struct cntr_entry *entry,
3516 void *context, int vl, int mode, u64 data)
3517{
3518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3519
3520 return dd->send_egress_err_status_cnt[24];
3521}
3522
3523static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3524 const struct cntr_entry *entry,
3525 void *context, int vl, int mode, u64 data)
3526{
3527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3528
3529 return dd->send_egress_err_status_cnt[23];
3530}
3531
3532static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3533 const struct cntr_entry *entry,
3534 void *context, int vl, int mode, u64 data)
3535{
3536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3537
3538 return dd->send_egress_err_status_cnt[22];
3539}
3540
3541static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3542 const struct cntr_entry *entry,
3543 void *context, int vl, int mode, u64 data)
3544{
3545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3546
3547 return dd->send_egress_err_status_cnt[21];
3548}
3549
3550static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3551 const struct cntr_entry *entry,
3552 void *context, int vl, int mode, u64 data)
3553{
3554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3555
3556 return dd->send_egress_err_status_cnt[20];
3557}
3558
3559static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3560 const struct cntr_entry *entry,
3561 void *context, int vl, int mode, u64 data)
3562{
3563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3564
3565 return dd->send_egress_err_status_cnt[19];
3566}
3567
3568static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3569 const struct cntr_entry *entry,
3570 void *context, int vl, int mode, u64 data)
3571{
3572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3573
3574 return dd->send_egress_err_status_cnt[18];
3575}
3576
3577static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3578 const struct cntr_entry *entry,
3579 void *context, int vl, int mode, u64 data)
3580{
3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582
3583 return dd->send_egress_err_status_cnt[17];
3584}
3585
3586static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3587 const struct cntr_entry *entry,
3588 void *context, int vl, int mode, u64 data)
3589{
3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591
3592 return dd->send_egress_err_status_cnt[16];
3593}
3594
3595static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3596 void *context, int vl, int mode,
3597 u64 data)
3598{
3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600
3601 return dd->send_egress_err_status_cnt[15];
3602}
3603
3604static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3605 void *context, int vl,
3606 int mode, u64 data)
3607{
3608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3609
3610 return dd->send_egress_err_status_cnt[14];
3611}
3612
3613static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3614 void *context, int vl, int mode,
3615 u64 data)
3616{
3617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3618
3619 return dd->send_egress_err_status_cnt[13];
3620}
3621
3622static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3623 void *context, int vl, int mode,
3624 u64 data)
3625{
3626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3627
3628 return dd->send_egress_err_status_cnt[12];
3629}
3630
3631static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3632 const struct cntr_entry *entry,
3633 void *context, int vl, int mode, u64 data)
3634{
3635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3636
3637 return dd->send_egress_err_status_cnt[11];
3638}
3639
3640static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3641 void *context, int vl, int mode,
3642 u64 data)
3643{
3644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3645
3646 return dd->send_egress_err_status_cnt[10];
3647}
3648
3649static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3650 void *context, int vl, int mode,
3651 u64 data)
3652{
3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654
3655 return dd->send_egress_err_status_cnt[9];
3656}
3657
3658static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3659 const struct cntr_entry *entry,
3660 void *context, int vl, int mode, u64 data)
3661{
3662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3663
3664 return dd->send_egress_err_status_cnt[8];
3665}
3666
3667static u64 access_tx_pio_launch_intf_parity_err_cnt(
3668 const struct cntr_entry *entry,
3669 void *context, int vl, int mode, u64 data)
3670{
3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672
3673 return dd->send_egress_err_status_cnt[7];
3674}
3675
3676static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3677 void *context, int vl, int mode,
3678 u64 data)
3679{
3680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3681
3682 return dd->send_egress_err_status_cnt[6];
3683}
3684
3685static u64 access_tx_incorrect_link_state_err_cnt(
3686 const struct cntr_entry *entry,
3687 void *context, int vl, int mode, u64 data)
3688{
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690
3691 return dd->send_egress_err_status_cnt[5];
3692}
3693
3694static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3695 void *context, int vl, int mode,
3696 u64 data)
3697{
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->send_egress_err_status_cnt[4];
3701}
3702
3703static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3704 const struct cntr_entry *entry,
3705 void *context, int vl, int mode, u64 data)
3706{
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->send_egress_err_status_cnt[3];
3710}
3711
3712static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3713 void *context, int vl, int mode,
3714 u64 data)
3715{
3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717
3718 return dd->send_egress_err_status_cnt[2];
3719}
3720
3721static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3722 const struct cntr_entry *entry,
3723 void *context, int vl, int mode, u64 data)
3724{
3725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726
3727 return dd->send_egress_err_status_cnt[1];
3728}
3729
3730static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3731 const struct cntr_entry *entry,
3732 void *context, int vl, int mode, u64 data)
3733{
3734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3735
3736 return dd->send_egress_err_status_cnt[0];
3737}
3738
3739/*
3740 * Software counters corresponding to each of the
3741 * error status bits within SendErrStatus
3742 */
3743static u64 access_send_csr_write_bad_addr_err_cnt(
3744 const struct cntr_entry *entry,
3745 void *context, int vl, int mode, u64 data)
3746{
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->send_err_status_cnt[2];
3750}
3751
3752static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3753 void *context, int vl,
3754 int mode, u64 data)
3755{
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->send_err_status_cnt[1];
3759}
3760
3761static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3762 void *context, int vl, int mode,
3763 u64 data)
3764{
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->send_err_status_cnt[0];
3768}
3769
3770/*
3771 * Software counters corresponding to each of the
3772 * error status bits within SendCtxtErrStatus
3773 */
3774static u64 access_pio_write_out_of_bounds_err_cnt(
3775 const struct cntr_entry *entry,
3776 void *context, int vl, int mode, u64 data)
3777{
3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779
3780 return dd->sw_ctxt_err_status_cnt[4];
3781}
3782
3783static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3784 void *context, int vl, int mode,
3785 u64 data)
3786{
3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3788
3789 return dd->sw_ctxt_err_status_cnt[3];
3790}
3791
3792static u64 access_pio_write_crosses_boundary_err_cnt(
3793 const struct cntr_entry *entry,
3794 void *context, int vl, int mode, u64 data)
3795{
3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3797
3798 return dd->sw_ctxt_err_status_cnt[2];
3799}
3800
3801static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3802 void *context, int vl,
3803 int mode, u64 data)
3804{
3805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3806
3807 return dd->sw_ctxt_err_status_cnt[1];
3808}
3809
3810static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3811 void *context, int vl, int mode,
3812 u64 data)
3813{
3814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3815
3816 return dd->sw_ctxt_err_status_cnt[0];
3817}
3818
3819/*
3820 * Software counters corresponding to each of the
3821 * error status bits within SendDmaEngErrStatus
3822 */
3823static u64 access_sdma_header_request_fifo_cor_err_cnt(
3824 const struct cntr_entry *entry,
3825 void *context, int vl, int mode, u64 data)
3826{
3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828
3829 return dd->sw_send_dma_eng_err_status_cnt[23];
3830}
3831
3832static u64 access_sdma_header_storage_cor_err_cnt(
3833 const struct cntr_entry *entry,
3834 void *context, int vl, int mode, u64 data)
3835{
3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3837
3838 return dd->sw_send_dma_eng_err_status_cnt[22];
3839}
3840
3841static u64 access_sdma_packet_tracking_cor_err_cnt(
3842 const struct cntr_entry *entry,
3843 void *context, int vl, int mode, u64 data)
3844{
3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3846
3847 return dd->sw_send_dma_eng_err_status_cnt[21];
3848}
3849
3850static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3851 void *context, int vl, int mode,
3852 u64 data)
3853{
3854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3855
3856 return dd->sw_send_dma_eng_err_status_cnt[20];
3857}
3858
3859static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3860 void *context, int vl, int mode,
3861 u64 data)
3862{
3863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3864
3865 return dd->sw_send_dma_eng_err_status_cnt[19];
3866}
3867
3868static u64 access_sdma_header_request_fifo_unc_err_cnt(
3869 const struct cntr_entry *entry,
3870 void *context, int vl, int mode, u64 data)
3871{
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873
3874 return dd->sw_send_dma_eng_err_status_cnt[18];
3875}
3876
3877static u64 access_sdma_header_storage_unc_err_cnt(
3878 const struct cntr_entry *entry,
3879 void *context, int vl, int mode, u64 data)
3880{
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882
3883 return dd->sw_send_dma_eng_err_status_cnt[17];
3884}
3885
3886static u64 access_sdma_packet_tracking_unc_err_cnt(
3887 const struct cntr_entry *entry,
3888 void *context, int vl, int mode, u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[16];
3893}
3894
3895static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode,
3897 u64 data)
3898{
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[15];
3902}
3903
3904static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907{
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[14];
3911}
3912
3913static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3914 void *context, int vl, int mode,
3915 u64 data)
3916{
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[13];
3920}
3921
3922static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3923 void *context, int vl, int mode,
3924 u64 data)
3925{
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[12];
3929}
3930
3931static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3932 void *context, int vl, int mode,
3933 u64 data)
3934{
3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936
3937 return dd->sw_send_dma_eng_err_status_cnt[11];
3938}
3939
3940static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3941 void *context, int vl, int mode,
3942 u64 data)
3943{
3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945
3946 return dd->sw_send_dma_eng_err_status_cnt[10];
3947}
3948
3949static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3950 void *context, int vl, int mode,
3951 u64 data)
3952{
3953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3954
3955 return dd->sw_send_dma_eng_err_status_cnt[9];
3956}
3957
3958static u64 access_sdma_packet_desc_overflow_err_cnt(
3959 const struct cntr_entry *entry,
3960 void *context, int vl, int mode, u64 data)
3961{
3962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3963
3964 return dd->sw_send_dma_eng_err_status_cnt[8];
3965}
3966
3967static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3968 void *context, int vl,
3969 int mode, u64 data)
3970{
3971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3972
3973 return dd->sw_send_dma_eng_err_status_cnt[7];
3974}
3975
3976static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3977 void *context, int vl, int mode, u64 data)
3978{
3979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3980
3981 return dd->sw_send_dma_eng_err_status_cnt[6];
3982}
3983
3984static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3985 void *context, int vl, int mode,
3986 u64 data)
3987{
3988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3989
3990 return dd->sw_send_dma_eng_err_status_cnt[5];
3991}
3992
3993static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3994 void *context, int vl, int mode,
3995 u64 data)
3996{
3997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3998
3999 return dd->sw_send_dma_eng_err_status_cnt[4];
4000}
4001
4002static u64 access_sdma_tail_out_of_bounds_err_cnt(
4003 const struct cntr_entry *entry,
4004 void *context, int vl, int mode, u64 data)
4005{
4006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4007
4008 return dd->sw_send_dma_eng_err_status_cnt[3];
4009}
4010
4011static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4012 void *context, int vl, int mode,
4013 u64 data)
4014{
4015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4016
4017 return dd->sw_send_dma_eng_err_status_cnt[2];
4018}
4019
4020static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4021 void *context, int vl, int mode,
4022 u64 data)
4023{
4024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4025
4026 return dd->sw_send_dma_eng_err_status_cnt[1];
4027}
4028
4029static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4030 void *context, int vl, int mode,
4031 u64 data)
4032{
4033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4034
4035 return dd->sw_send_dma_eng_err_status_cnt[0];
4036}
4037
Jakub Pawlak2b719042016-07-01 16:01:22 -07004038static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4039 void *context, int vl, int mode,
4040 u64 data)
4041{
4042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4043
4044 u64 val = 0;
4045 u64 csr = entry->csr;
4046
4047 val = read_write_csr(dd, csr, mode, data);
4048 if (mode == CNTR_MODE_R) {
4049 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4050 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4051 } else if (mode == CNTR_MODE_W) {
4052 dd->sw_rcv_bypass_packet_errors = 0;
4053 } else {
4054 dd_dev_err(dd, "Invalid cntr register access mode");
4055 return 0;
4056 }
4057 return val;
4058}
4059
Mike Marciniszyn77241052015-07-30 15:17:43 -04004060#define def_access_sw_cpu(cntr) \
4061static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4062 void *context, int vl, int mode, u64 data) \
4063{ \
4064 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004065 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4066 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004067 mode, data); \
4068}
4069
4070def_access_sw_cpu(rc_acks);
4071def_access_sw_cpu(rc_qacks);
4072def_access_sw_cpu(rc_delayed_comp);
4073
4074#define def_access_ibp_counter(cntr) \
4075static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4076 void *context, int vl, int mode, u64 data) \
4077{ \
4078 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4079 \
4080 if (vl != CNTR_INVALID_VL) \
4081 return 0; \
4082 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004083 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004084 mode, data); \
4085}
4086
4087def_access_ibp_counter(loop_pkts);
4088def_access_ibp_counter(rc_resends);
4089def_access_ibp_counter(rnr_naks);
4090def_access_ibp_counter(other_naks);
4091def_access_ibp_counter(rc_timeouts);
4092def_access_ibp_counter(pkt_drops);
4093def_access_ibp_counter(dmawait);
4094def_access_ibp_counter(rc_seqnak);
4095def_access_ibp_counter(rc_dupreq);
4096def_access_ibp_counter(rdma_seq);
4097def_access_ibp_counter(unaligned);
4098def_access_ibp_counter(seq_naks);
4099
4100static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4101[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4102[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4103 CNTR_NORMAL),
4104[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4105 CNTR_NORMAL),
4106[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4107 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4108 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004109[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4110 CNTR_NORMAL),
4111[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4112 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4113[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4114 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4115[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4116 CNTR_NORMAL),
4117[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4118 CNTR_NORMAL),
4119[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4120 CNTR_NORMAL),
4121[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4122 CNTR_NORMAL),
4123[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4124 CNTR_NORMAL),
4125[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4126 CNTR_NORMAL),
4127[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4128 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4129[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4130 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4131[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4132 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004133[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4134 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004135[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4136 CNTR_SYNTH),
4137[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4138 CNTR_SYNTH),
4139[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4140 CNTR_SYNTH),
4141[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4142 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4143[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4144 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4145 CNTR_SYNTH),
4146[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4147 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4148[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4149 CNTR_SYNTH),
4150[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4151 CNTR_SYNTH),
4152[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4153 CNTR_SYNTH),
4154[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4155 CNTR_SYNTH),
4156[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4157 CNTR_SYNTH),
4158[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4159 CNTR_SYNTH),
4160[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4161 CNTR_SYNTH),
4162[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4163 CNTR_SYNTH | CNTR_VL),
4164[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4165 CNTR_SYNTH | CNTR_VL),
4166[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4167[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4168 CNTR_SYNTH | CNTR_VL),
4169[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4170[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4171 CNTR_SYNTH | CNTR_VL),
4172[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4173 CNTR_SYNTH),
4174[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4175 CNTR_SYNTH | CNTR_VL),
4176[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4177 CNTR_SYNTH),
4178[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4179 CNTR_SYNTH | CNTR_VL),
4180[C_DC_TOTAL_CRC] =
4181 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4182 CNTR_SYNTH),
4183[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4184 CNTR_SYNTH),
4185[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4186 CNTR_SYNTH),
4187[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4188 CNTR_SYNTH),
4189[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4190 CNTR_SYNTH),
4191[C_DC_CRC_MULT_LN] =
4192 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4193 CNTR_SYNTH),
4194[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4195 CNTR_SYNTH),
4196[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4197 CNTR_SYNTH),
4198[C_DC_SEQ_CRC_CNT] =
4199 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4200 CNTR_SYNTH),
4201[C_DC_ESC0_ONLY_CNT] =
4202 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4203 CNTR_SYNTH),
4204[C_DC_ESC0_PLUS1_CNT] =
4205 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4206 CNTR_SYNTH),
4207[C_DC_ESC0_PLUS2_CNT] =
4208 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4209 CNTR_SYNTH),
4210[C_DC_REINIT_FROM_PEER_CNT] =
4211 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4212 CNTR_SYNTH),
4213[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4214 CNTR_SYNTH),
4215[C_DC_MISC_FLG_CNT] =
4216 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4217 CNTR_SYNTH),
4218[C_DC_PRF_GOOD_LTP_CNT] =
4219 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4220[C_DC_PRF_ACCEPTED_LTP_CNT] =
4221 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4222 CNTR_SYNTH),
4223[C_DC_PRF_RX_FLIT_CNT] =
4224 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4225[C_DC_PRF_TX_FLIT_CNT] =
4226 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4227[C_DC_PRF_CLK_CNTR] =
4228 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4229[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4230 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4231[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4232 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4233 CNTR_SYNTH),
4234[C_DC_PG_STS_TX_SBE_CNT] =
4235 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4236[C_DC_PG_STS_TX_MBE_CNT] =
4237 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4238 CNTR_SYNTH),
4239[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4240 access_sw_cpu_intr),
4241[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4242 access_sw_cpu_rcv_limit),
4243[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4244 access_sw_vtx_wait),
4245[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4246 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004247[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4248 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004249[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4250 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004251[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4252 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004253[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4254 SEND_DMA_DESC_FETCHED_CNT, 0,
4255 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4256 dev_access_u32_csr),
4257[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4258 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4259 access_sde_int_cnt),
4260[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4261 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4262 access_sde_err_cnt),
4263[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4264 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4265 access_sde_idle_int_cnt),
4266[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4267 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4268 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004269/* MISC_ERR_STATUS */
4270[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4271 CNTR_NORMAL,
4272 access_misc_pll_lock_fail_err_cnt),
4273[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4274 CNTR_NORMAL,
4275 access_misc_mbist_fail_err_cnt),
4276[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4277 CNTR_NORMAL,
4278 access_misc_invalid_eep_cmd_err_cnt),
4279[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4280 CNTR_NORMAL,
4281 access_misc_efuse_done_parity_err_cnt),
4282[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4283 CNTR_NORMAL,
4284 access_misc_efuse_write_err_cnt),
4285[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4286 0, CNTR_NORMAL,
4287 access_misc_efuse_read_bad_addr_err_cnt),
4288[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4289 CNTR_NORMAL,
4290 access_misc_efuse_csr_parity_err_cnt),
4291[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4292 CNTR_NORMAL,
4293 access_misc_fw_auth_failed_err_cnt),
4294[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4295 CNTR_NORMAL,
4296 access_misc_key_mismatch_err_cnt),
4297[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4298 CNTR_NORMAL,
4299 access_misc_sbus_write_failed_err_cnt),
4300[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4301 CNTR_NORMAL,
4302 access_misc_csr_write_bad_addr_err_cnt),
4303[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4304 CNTR_NORMAL,
4305 access_misc_csr_read_bad_addr_err_cnt),
4306[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4307 CNTR_NORMAL,
4308 access_misc_csr_parity_err_cnt),
4309/* CceErrStatus */
4310[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4311 CNTR_NORMAL,
4312 access_sw_cce_err_status_aggregated_cnt),
4313[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_cce_msix_csr_parity_err_cnt),
4316[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4317 CNTR_NORMAL,
4318 access_cce_int_map_unc_err_cnt),
4319[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4320 CNTR_NORMAL,
4321 access_cce_int_map_cor_err_cnt),
4322[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4323 CNTR_NORMAL,
4324 access_cce_msix_table_unc_err_cnt),
4325[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4326 CNTR_NORMAL,
4327 access_cce_msix_table_cor_err_cnt),
4328[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4329 0, CNTR_NORMAL,
4330 access_cce_rxdma_conv_fifo_parity_err_cnt),
4331[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4332 0, CNTR_NORMAL,
4333 access_cce_rcpl_async_fifo_parity_err_cnt),
4334[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4335 CNTR_NORMAL,
4336 access_cce_seg_write_bad_addr_err_cnt),
4337[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_cce_seg_read_bad_addr_err_cnt),
4340[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4341 CNTR_NORMAL,
4342 access_la_triggered_cnt),
4343[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_cce_trgt_cpl_timeout_err_cnt),
4346[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_pcic_receive_parity_err_cnt),
4349[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4350 CNTR_NORMAL,
4351 access_pcic_transmit_back_parity_err_cnt),
4352[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4353 0, CNTR_NORMAL,
4354 access_pcic_transmit_front_parity_err_cnt),
4355[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4356 CNTR_NORMAL,
4357 access_pcic_cpl_dat_q_unc_err_cnt),
4358[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_pcic_cpl_hd_q_unc_err_cnt),
4361[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4362 CNTR_NORMAL,
4363 access_pcic_post_dat_q_unc_err_cnt),
4364[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_pcic_post_hd_q_unc_err_cnt),
4367[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_pcic_retry_sot_mem_unc_err_cnt),
4370[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_pcic_retry_mem_unc_err),
4373[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_pcic_n_post_dat_q_parity_err_cnt),
4376[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_pcic_n_post_h_q_parity_err_cnt),
4379[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_pcic_cpl_dat_q_cor_err_cnt),
4382[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_pcic_cpl_hd_q_cor_err_cnt),
4385[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_pcic_post_dat_q_cor_err_cnt),
4388[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_pcic_post_hd_q_cor_err_cnt),
4391[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_pcic_retry_sot_mem_cor_err_cnt),
4394[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_pcic_retry_mem_cor_err_cnt),
4397[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4398 "CceCli1AsyncFifoDbgParityError", 0, 0,
4399 CNTR_NORMAL,
4400 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4401[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4402 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4403 CNTR_NORMAL,
4404 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4405 ),
4406[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4407 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4410[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4411 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4414[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4415 0, CNTR_NORMAL,
4416 access_cce_cli2_async_fifo_parity_err_cnt),
4417[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4418 CNTR_NORMAL,
4419 access_cce_csr_cfg_bus_parity_err_cnt),
4420[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4421 0, CNTR_NORMAL,
4422 access_cce_cli0_async_fifo_parity_err_cnt),
4423[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4424 CNTR_NORMAL,
4425 access_cce_rspd_data_parity_err_cnt),
4426[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_cce_trgt_access_err_cnt),
4429[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4430 0, CNTR_NORMAL,
4431 access_cce_trgt_async_fifo_parity_err_cnt),
4432[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4433 CNTR_NORMAL,
4434 access_cce_csr_write_bad_addr_err_cnt),
4435[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4436 CNTR_NORMAL,
4437 access_cce_csr_read_bad_addr_err_cnt),
4438[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4439 CNTR_NORMAL,
4440 access_ccs_csr_parity_err_cnt),
4441
4442/* RcvErrStatus */
4443[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4444 CNTR_NORMAL,
4445 access_rx_csr_parity_err_cnt),
4446[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_rx_csr_write_bad_addr_err_cnt),
4449[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4450 CNTR_NORMAL,
4451 access_rx_csr_read_bad_addr_err_cnt),
4452[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4453 CNTR_NORMAL,
4454 access_rx_dma_csr_unc_err_cnt),
4455[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4456 CNTR_NORMAL,
4457 access_rx_dma_dq_fsm_encoding_err_cnt),
4458[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_rx_dma_eq_fsm_encoding_err_cnt),
4461[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_dma_csr_parity_err_cnt),
4464[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_rx_rbuf_data_cor_err_cnt),
4467[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_rx_rbuf_data_unc_err_cnt),
4470[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_dma_data_fifo_rd_cor_err_cnt),
4473[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_dma_data_fifo_rd_unc_err_cnt),
4476[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4479[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4482[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rbuf_desc_part2_cor_err_cnt),
4485[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_rbuf_desc_part2_unc_err_cnt),
4488[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rbuf_desc_part1_cor_err_cnt),
4491[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_rbuf_desc_part1_unc_err_cnt),
4494[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_hq_intr_fsm_err_cnt),
4497[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_hq_intr_csr_parity_err_cnt),
4500[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_lookup_csr_parity_err_cnt),
4503[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_lookup_rcv_array_cor_err_cnt),
4506[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_lookup_rcv_array_unc_err_cnt),
4509[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4510 0, CNTR_NORMAL,
4511 access_rx_lookup_des_part2_parity_err_cnt),
4512[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4513 0, CNTR_NORMAL,
4514 access_rx_lookup_des_part1_unc_cor_err_cnt),
4515[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4516 CNTR_NORMAL,
4517 access_rx_lookup_des_part1_unc_err_cnt),
4518[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4519 CNTR_NORMAL,
4520 access_rx_rbuf_next_free_buf_cor_err_cnt),
4521[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4522 CNTR_NORMAL,
4523 access_rx_rbuf_next_free_buf_unc_err_cnt),
4524[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4525 "RxRbufFlInitWrAddrParityErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4528[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4529 0, CNTR_NORMAL,
4530 access_rx_rbuf_fl_initdone_parity_err_cnt),
4531[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4532 0, CNTR_NORMAL,
4533 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4534[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4537[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_rx_rbuf_empty_err_cnt),
4540[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_rx_rbuf_full_err_cnt),
4543[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4544 CNTR_NORMAL,
4545 access_rbuf_bad_lookup_err_cnt),
4546[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4547 CNTR_NORMAL,
4548 access_rbuf_ctx_id_parity_err_cnt),
4549[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_rbuf_csr_qeopdw_parity_err_cnt),
4552[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4553 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4556[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4557 "RxRbufCsrQTlPtrParityErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4560[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4561 0, CNTR_NORMAL,
4562 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4563[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4564 0, CNTR_NORMAL,
4565 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4566[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4567 0, 0, CNTR_NORMAL,
4568 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4569[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4570 0, CNTR_NORMAL,
4571 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4572[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4573 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4576[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4577 0, CNTR_NORMAL,
4578 access_rx_rbuf_block_list_read_cor_err_cnt),
4579[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4580 0, CNTR_NORMAL,
4581 access_rx_rbuf_block_list_read_unc_err_cnt),
4582[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_rx_rbuf_lookup_des_cor_err_cnt),
4585[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_rx_rbuf_lookup_des_unc_err_cnt),
4588[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4589 "RxRbufLookupDesRegUncCorErr", 0, 0,
4590 CNTR_NORMAL,
4591 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4592[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4595[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_rx_rbuf_free_list_cor_err_cnt),
4598[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_rx_rbuf_free_list_unc_err_cnt),
4601[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_rx_rcv_fsm_encoding_err_cnt),
4604[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_rx_dma_flag_cor_err_cnt),
4607[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_rx_dma_flag_unc_err_cnt),
4610[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4611 CNTR_NORMAL,
4612 access_rx_dc_sop_eop_parity_err_cnt),
4613[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_rx_rcv_csr_parity_err_cnt),
4616[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_rx_rcv_qp_map_table_cor_err_cnt),
4619[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4620 CNTR_NORMAL,
4621 access_rx_rcv_qp_map_table_unc_err_cnt),
4622[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4623 CNTR_NORMAL,
4624 access_rx_rcv_data_cor_err_cnt),
4625[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_rx_rcv_data_unc_err_cnt),
4628[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4629 CNTR_NORMAL,
4630 access_rx_rcv_hdr_cor_err_cnt),
4631[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4632 CNTR_NORMAL,
4633 access_rx_rcv_hdr_unc_err_cnt),
4634[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4635 CNTR_NORMAL,
4636 access_rx_dc_intf_parity_err_cnt),
4637[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4638 CNTR_NORMAL,
4639 access_rx_dma_csr_cor_err_cnt),
4640/* SendPioErrStatus */
4641[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4642 CNTR_NORMAL,
4643 access_pio_pec_sop_head_parity_err_cnt),
4644[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4645 CNTR_NORMAL,
4646 access_pio_pcc_sop_head_parity_err_cnt),
4647[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4648 0, 0, CNTR_NORMAL,
4649 access_pio_last_returned_cnt_parity_err_cnt),
4650[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4651 0, CNTR_NORMAL,
4652 access_pio_current_free_cnt_parity_err_cnt),
4653[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4654 CNTR_NORMAL,
4655 access_pio_reserved_31_err_cnt),
4656[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4657 CNTR_NORMAL,
4658 access_pio_reserved_30_err_cnt),
4659[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_pio_ppmc_sop_len_err_cnt),
4662[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_pio_ppmc_bqc_mem_parity_err_cnt),
4665[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_pio_vl_fifo_parity_err_cnt),
4668[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_pio_vlf_sop_parity_err_cnt),
4671[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4672 CNTR_NORMAL,
4673 access_pio_vlf_v1_len_parity_err_cnt),
4674[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4675 CNTR_NORMAL,
4676 access_pio_block_qw_count_parity_err_cnt),
4677[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4678 CNTR_NORMAL,
4679 access_pio_write_qw_valid_parity_err_cnt),
4680[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_pio_state_machine_err_cnt),
4683[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_pio_write_data_parity_err_cnt),
4686[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4687 CNTR_NORMAL,
4688 access_pio_host_addr_mem_cor_err_cnt),
4689[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_pio_host_addr_mem_unc_err_cnt),
4692[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4695[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_pio_init_sm_in_err_cnt),
4698[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_pio_ppmc_pbl_fifo_err_cnt),
4701[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4702 0, CNTR_NORMAL,
4703 access_pio_credit_ret_fifo_parity_err_cnt),
4704[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_pio_v1_len_mem_bank1_cor_err_cnt),
4707[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_pio_v1_len_mem_bank0_cor_err_cnt),
4710[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_pio_v1_len_mem_bank1_unc_err_cnt),
4713[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_pio_v1_len_mem_bank0_unc_err_cnt),
4716[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_pio_sm_pkt_reset_parity_err_cnt),
4719[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4720 CNTR_NORMAL,
4721 access_pio_pkt_evict_fifo_parity_err_cnt),
4722[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4723 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4724 CNTR_NORMAL,
4725 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4726[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4727 CNTR_NORMAL,
4728 access_pio_sbrdctl_crrel_parity_err_cnt),
4729[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4730 CNTR_NORMAL,
4731 access_pio_pec_fifo_parity_err_cnt),
4732[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4733 CNTR_NORMAL,
4734 access_pio_pcc_fifo_parity_err_cnt),
4735[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4736 CNTR_NORMAL,
4737 access_pio_sb_mem_fifo1_err_cnt),
4738[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4739 CNTR_NORMAL,
4740 access_pio_sb_mem_fifo0_err_cnt),
4741[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4742 CNTR_NORMAL,
4743 access_pio_csr_parity_err_cnt),
4744[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4745 CNTR_NORMAL,
4746 access_pio_write_addr_parity_err_cnt),
4747[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4748 CNTR_NORMAL,
4749 access_pio_write_bad_ctxt_err_cnt),
4750/* SendDmaErrStatus */
4751[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4752 0, CNTR_NORMAL,
4753 access_sdma_pcie_req_tracking_cor_err_cnt),
4754[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4755 0, CNTR_NORMAL,
4756 access_sdma_pcie_req_tracking_unc_err_cnt),
4757[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4758 CNTR_NORMAL,
4759 access_sdma_csr_parity_err_cnt),
4760[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4761 CNTR_NORMAL,
4762 access_sdma_rpy_tag_err_cnt),
4763/* SendEgressErrStatus */
4764[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4765 CNTR_NORMAL,
4766 access_tx_read_pio_memory_csr_unc_err_cnt),
4767[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4768 0, CNTR_NORMAL,
4769 access_tx_read_sdma_memory_csr_err_cnt),
4770[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4771 CNTR_NORMAL,
4772 access_tx_egress_fifo_cor_err_cnt),
4773[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4774 CNTR_NORMAL,
4775 access_tx_read_pio_memory_cor_err_cnt),
4776[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4777 CNTR_NORMAL,
4778 access_tx_read_sdma_memory_cor_err_cnt),
4779[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4780 CNTR_NORMAL,
4781 access_tx_sb_hdr_cor_err_cnt),
4782[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4783 CNTR_NORMAL,
4784 access_tx_credit_overrun_err_cnt),
4785[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4786 CNTR_NORMAL,
4787 access_tx_launch_fifo8_cor_err_cnt),
4788[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4789 CNTR_NORMAL,
4790 access_tx_launch_fifo7_cor_err_cnt),
4791[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4792 CNTR_NORMAL,
4793 access_tx_launch_fifo6_cor_err_cnt),
4794[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4795 CNTR_NORMAL,
4796 access_tx_launch_fifo5_cor_err_cnt),
4797[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4798 CNTR_NORMAL,
4799 access_tx_launch_fifo4_cor_err_cnt),
4800[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4801 CNTR_NORMAL,
4802 access_tx_launch_fifo3_cor_err_cnt),
4803[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4804 CNTR_NORMAL,
4805 access_tx_launch_fifo2_cor_err_cnt),
4806[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4807 CNTR_NORMAL,
4808 access_tx_launch_fifo1_cor_err_cnt),
4809[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4810 CNTR_NORMAL,
4811 access_tx_launch_fifo0_cor_err_cnt),
4812[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4813 CNTR_NORMAL,
4814 access_tx_credit_return_vl_err_cnt),
4815[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4816 CNTR_NORMAL,
4817 access_tx_hcrc_insertion_err_cnt),
4818[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4819 CNTR_NORMAL,
4820 access_tx_egress_fifo_unc_err_cnt),
4821[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_read_pio_memory_unc_err_cnt),
4824[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4825 CNTR_NORMAL,
4826 access_tx_read_sdma_memory_unc_err_cnt),
4827[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_sb_hdr_unc_err_cnt),
4830[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_credit_return_partiy_err_cnt),
4833[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4834 0, 0, CNTR_NORMAL,
4835 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4836[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4837 0, 0, CNTR_NORMAL,
4838 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4839[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4840 0, 0, CNTR_NORMAL,
4841 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4842[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4843 0, 0, CNTR_NORMAL,
4844 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4845[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4846 0, 0, CNTR_NORMAL,
4847 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4848[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4849 0, 0, CNTR_NORMAL,
4850 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4851[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4852 0, 0, CNTR_NORMAL,
4853 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4854[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4855 0, 0, CNTR_NORMAL,
4856 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4857[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4858 0, 0, CNTR_NORMAL,
4859 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4860[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4861 0, 0, CNTR_NORMAL,
4862 access_tx_sdma15_disallowed_packet_err_cnt),
4863[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4864 0, 0, CNTR_NORMAL,
4865 access_tx_sdma14_disallowed_packet_err_cnt),
4866[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4867 0, 0, CNTR_NORMAL,
4868 access_tx_sdma13_disallowed_packet_err_cnt),
4869[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4870 0, 0, CNTR_NORMAL,
4871 access_tx_sdma12_disallowed_packet_err_cnt),
4872[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4873 0, 0, CNTR_NORMAL,
4874 access_tx_sdma11_disallowed_packet_err_cnt),
4875[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4876 0, 0, CNTR_NORMAL,
4877 access_tx_sdma10_disallowed_packet_err_cnt),
4878[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4879 0, 0, CNTR_NORMAL,
4880 access_tx_sdma9_disallowed_packet_err_cnt),
4881[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4882 0, 0, CNTR_NORMAL,
4883 access_tx_sdma8_disallowed_packet_err_cnt),
4884[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4885 0, 0, CNTR_NORMAL,
4886 access_tx_sdma7_disallowed_packet_err_cnt),
4887[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4888 0, 0, CNTR_NORMAL,
4889 access_tx_sdma6_disallowed_packet_err_cnt),
4890[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4891 0, 0, CNTR_NORMAL,
4892 access_tx_sdma5_disallowed_packet_err_cnt),
4893[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4894 0, 0, CNTR_NORMAL,
4895 access_tx_sdma4_disallowed_packet_err_cnt),
4896[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4897 0, 0, CNTR_NORMAL,
4898 access_tx_sdma3_disallowed_packet_err_cnt),
4899[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4900 0, 0, CNTR_NORMAL,
4901 access_tx_sdma2_disallowed_packet_err_cnt),
4902[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4903 0, 0, CNTR_NORMAL,
4904 access_tx_sdma1_disallowed_packet_err_cnt),
4905[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4906 0, 0, CNTR_NORMAL,
4907 access_tx_sdma0_disallowed_packet_err_cnt),
4908[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_tx_config_parity_err_cnt),
4911[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_tx_sbrd_ctl_csr_parity_err_cnt),
4914[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_tx_launch_csr_parity_err_cnt),
4917[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_tx_illegal_vl_err_cnt),
4920[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4921 "TxSbrdCtlStateMachineParityErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4924[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4925 CNTR_NORMAL,
4926 access_egress_reserved_10_err_cnt),
4927[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4928 CNTR_NORMAL,
4929 access_egress_reserved_9_err_cnt),
4930[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4931 0, 0, CNTR_NORMAL,
4932 access_tx_sdma_launch_intf_parity_err_cnt),
4933[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_tx_pio_launch_intf_parity_err_cnt),
4936[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4937 CNTR_NORMAL,
4938 access_egress_reserved_6_err_cnt),
4939[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4940 CNTR_NORMAL,
4941 access_tx_incorrect_link_state_err_cnt),
4942[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4943 CNTR_NORMAL,
4944 access_tx_linkdown_err_cnt),
4945[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4946 "EgressFifoUnderrunOrParityErr", 0, 0,
4947 CNTR_NORMAL,
4948 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4949[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4950 CNTR_NORMAL,
4951 access_egress_reserved_2_err_cnt),
4952[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4953 CNTR_NORMAL,
4954 access_tx_pkt_integrity_mem_unc_err_cnt),
4955[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4956 CNTR_NORMAL,
4957 access_tx_pkt_integrity_mem_cor_err_cnt),
4958/* SendErrStatus */
4959[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4960 CNTR_NORMAL,
4961 access_send_csr_write_bad_addr_err_cnt),
4962[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4963 CNTR_NORMAL,
4964 access_send_csr_read_bad_addr_err_cnt),
4965[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4966 CNTR_NORMAL,
4967 access_send_csr_parity_cnt),
4968/* SendCtxtErrStatus */
4969[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4970 CNTR_NORMAL,
4971 access_pio_write_out_of_bounds_err_cnt),
4972[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4973 CNTR_NORMAL,
4974 access_pio_write_overflow_err_cnt),
4975[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4976 0, 0, CNTR_NORMAL,
4977 access_pio_write_crosses_boundary_err_cnt),
4978[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4979 CNTR_NORMAL,
4980 access_pio_disallowed_packet_err_cnt),
4981[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4982 CNTR_NORMAL,
4983 access_pio_inconsistent_sop_err_cnt),
4984/* SendDmaEngErrStatus */
4985[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4986 0, 0, CNTR_NORMAL,
4987 access_sdma_header_request_fifo_cor_err_cnt),
4988[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4989 CNTR_NORMAL,
4990 access_sdma_header_storage_cor_err_cnt),
4991[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4992 CNTR_NORMAL,
4993 access_sdma_packet_tracking_cor_err_cnt),
4994[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4995 CNTR_NORMAL,
4996 access_sdma_assembly_cor_err_cnt),
4997[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4998 CNTR_NORMAL,
4999 access_sdma_desc_table_cor_err_cnt),
5000[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5001 0, 0, CNTR_NORMAL,
5002 access_sdma_header_request_fifo_unc_err_cnt),
5003[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5004 CNTR_NORMAL,
5005 access_sdma_header_storage_unc_err_cnt),
5006[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5007 CNTR_NORMAL,
5008 access_sdma_packet_tracking_unc_err_cnt),
5009[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5010 CNTR_NORMAL,
5011 access_sdma_assembly_unc_err_cnt),
5012[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5013 CNTR_NORMAL,
5014 access_sdma_desc_table_unc_err_cnt),
5015[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5016 CNTR_NORMAL,
5017 access_sdma_timeout_err_cnt),
5018[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5019 CNTR_NORMAL,
5020 access_sdma_header_length_err_cnt),
5021[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5022 CNTR_NORMAL,
5023 access_sdma_header_address_err_cnt),
5024[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5025 CNTR_NORMAL,
5026 access_sdma_header_select_err_cnt),
5027[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5028 CNTR_NORMAL,
5029 access_sdma_reserved_9_err_cnt),
5030[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5031 CNTR_NORMAL,
5032 access_sdma_packet_desc_overflow_err_cnt),
5033[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5034 CNTR_NORMAL,
5035 access_sdma_length_mismatch_err_cnt),
5036[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5037 CNTR_NORMAL,
5038 access_sdma_halt_err_cnt),
5039[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5040 CNTR_NORMAL,
5041 access_sdma_mem_read_err_cnt),
5042[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5043 CNTR_NORMAL,
5044 access_sdma_first_desc_err_cnt),
5045[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5046 CNTR_NORMAL,
5047 access_sdma_tail_out_of_bounds_err_cnt),
5048[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5049 CNTR_NORMAL,
5050 access_sdma_too_long_err_cnt),
5051[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5052 CNTR_NORMAL,
5053 access_sdma_gen_mismatch_err_cnt),
5054[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5055 CNTR_NORMAL,
5056 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005057};
5058
5059static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5060[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5061 CNTR_NORMAL),
5062[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5063 CNTR_NORMAL),
5064[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5065 CNTR_NORMAL),
5066[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5067 CNTR_NORMAL),
5068[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5069 CNTR_NORMAL),
5070[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5071 CNTR_NORMAL),
5072[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5073 CNTR_NORMAL),
5074[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5075[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5076[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5077[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005078 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005079[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005080 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005081[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005082 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005083[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5084[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5085[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005086 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005087[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005088 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005089[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5090 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005091[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005092 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005093[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005094 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5095 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005096[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005097 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005098[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005099 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005100[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5101[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5102[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5103[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5104[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5105[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5106[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5107[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5108[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5109[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5110[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5111[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5112[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5113 access_sw_cpu_rc_acks),
5114[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005115 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005116[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005117 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005118[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5119[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5120[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5121[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5122[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5123[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5124[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5125[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5126[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5127[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5128[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5129[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5130[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5131[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5132[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5133[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5134[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5135[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5136[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5137[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5138[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5139[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5140[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5141[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5142[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5143[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5144[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5145[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5146[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5147[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5148[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5149[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5150[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5151[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5152[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5153[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5154[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5155[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5156[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5157[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5158[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5159[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5160[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5161[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5162[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5163[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5164[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5165[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5166[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5167[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5168[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5169[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5170[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5171[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5172[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5173[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5174[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5175[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5176[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5177[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5178[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5179[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5180[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5181[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5182[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5183[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5184[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5185[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5186[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5187[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5188[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5189[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5190[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5191[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5192[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5193[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5194[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5195[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5196[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5197[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5198};
5199
5200/* ======================================================================== */
5201
Mike Marciniszyn77241052015-07-30 15:17:43 -04005202/* return true if this is chip revision revision a */
5203int is_ax(struct hfi1_devdata *dd)
5204{
5205 u8 chip_rev_minor =
5206 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5207 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5208 return (chip_rev_minor & 0xf0) == 0;
5209}
5210
5211/* return true if this is chip revision revision b */
5212int is_bx(struct hfi1_devdata *dd)
5213{
5214 u8 chip_rev_minor =
5215 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5216 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005217 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005218}
5219
5220/*
5221 * Append string s to buffer buf. Arguments curp and len are the current
5222 * position and remaining length, respectively.
5223 *
5224 * return 0 on success, 1 on out of room
5225 */
5226static int append_str(char *buf, char **curp, int *lenp, const char *s)
5227{
5228 char *p = *curp;
5229 int len = *lenp;
5230 int result = 0; /* success */
5231 char c;
5232
5233 /* add a comma, if first in the buffer */
5234 if (p != buf) {
5235 if (len == 0) {
5236 result = 1; /* out of room */
5237 goto done;
5238 }
5239 *p++ = ',';
5240 len--;
5241 }
5242
5243 /* copy the string */
5244 while ((c = *s++) != 0) {
5245 if (len == 0) {
5246 result = 1; /* out of room */
5247 goto done;
5248 }
5249 *p++ = c;
5250 len--;
5251 }
5252
5253done:
5254 /* write return values */
5255 *curp = p;
5256 *lenp = len;
5257
5258 return result;
5259}
5260
5261/*
5262 * Using the given flag table, print a comma separated string into
5263 * the buffer. End in '*' if the buffer is too short.
5264 */
5265static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005266 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005267{
5268 char extra[32];
5269 char *p = buf;
5270 int len = buf_len;
5271 int no_room = 0;
5272 int i;
5273
5274 /* make sure there is at least 2 so we can form "*" */
5275 if (len < 2)
5276 return "";
5277
5278 len--; /* leave room for a nul */
5279 for (i = 0; i < table_size; i++) {
5280 if (flags & table[i].flag) {
5281 no_room = append_str(buf, &p, &len, table[i].str);
5282 if (no_room)
5283 break;
5284 flags &= ~table[i].flag;
5285 }
5286 }
5287
5288 /* any undocumented bits left? */
5289 if (!no_room && flags) {
5290 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5291 no_room = append_str(buf, &p, &len, extra);
5292 }
5293
5294 /* add * if ran out of room */
5295 if (no_room) {
5296 /* may need to back up to add space for a '*' */
5297 if (len == 0)
5298 --p;
5299 *p++ = '*';
5300 }
5301
5302 /* add final nul - space already allocated above */
5303 *p = 0;
5304 return buf;
5305}
5306
5307/* first 8 CCE error interrupt source names */
5308static const char * const cce_misc_names[] = {
5309 "CceErrInt", /* 0 */
5310 "RxeErrInt", /* 1 */
5311 "MiscErrInt", /* 2 */
5312 "Reserved3", /* 3 */
5313 "PioErrInt", /* 4 */
5314 "SDmaErrInt", /* 5 */
5315 "EgressErrInt", /* 6 */
5316 "TxeErrInt" /* 7 */
5317};
5318
5319/*
5320 * Return the miscellaneous error interrupt name.
5321 */
5322static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5323{
5324 if (source < ARRAY_SIZE(cce_misc_names))
5325 strncpy(buf, cce_misc_names[source], bsize);
5326 else
Jubin John17fb4f22016-02-14 20:21:52 -08005327 snprintf(buf, bsize, "Reserved%u",
5328 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005329
5330 return buf;
5331}
5332
5333/*
5334 * Return the SDMA engine error interrupt name.
5335 */
5336static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5337{
5338 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5339 return buf;
5340}
5341
5342/*
5343 * Return the send context error interrupt name.
5344 */
5345static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5346{
5347 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5348 return buf;
5349}
5350
5351static const char * const various_names[] = {
5352 "PbcInt",
5353 "GpioAssertInt",
5354 "Qsfp1Int",
5355 "Qsfp2Int",
5356 "TCritInt"
5357};
5358
5359/*
5360 * Return the various interrupt name.
5361 */
5362static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5363{
5364 if (source < ARRAY_SIZE(various_names))
5365 strncpy(buf, various_names[source], bsize);
5366 else
Jubin John8638b772016-02-14 20:19:24 -08005367 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005368 return buf;
5369}
5370
5371/*
5372 * Return the DC interrupt name.
5373 */
5374static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5375{
5376 static const char * const dc_int_names[] = {
5377 "common",
5378 "lcb",
5379 "8051",
5380 "lbm" /* local block merge */
5381 };
5382
5383 if (source < ARRAY_SIZE(dc_int_names))
5384 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5385 else
5386 snprintf(buf, bsize, "DCInt%u", source);
5387 return buf;
5388}
5389
5390static const char * const sdma_int_names[] = {
5391 "SDmaInt",
5392 "SdmaIdleInt",
5393 "SdmaProgressInt",
5394};
5395
5396/*
5397 * Return the SDMA engine interrupt name.
5398 */
5399static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5400{
5401 /* what interrupt */
5402 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5403 /* which engine */
5404 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5405
5406 if (likely(what < 3))
5407 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5408 else
5409 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5410 return buf;
5411}
5412
5413/*
5414 * Return the receive available interrupt name.
5415 */
5416static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5417{
5418 snprintf(buf, bsize, "RcvAvailInt%u", source);
5419 return buf;
5420}
5421
5422/*
5423 * Return the receive urgent interrupt name.
5424 */
5425static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5426{
5427 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5428 return buf;
5429}
5430
5431/*
5432 * Return the send credit interrupt name.
5433 */
5434static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5435{
5436 snprintf(buf, bsize, "SendCreditInt%u", source);
5437 return buf;
5438}
5439
5440/*
5441 * Return the reserved interrupt name.
5442 */
5443static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5444{
5445 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5446 return buf;
5447}
5448
5449static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5450{
5451 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005452 cce_err_status_flags,
5453 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005454}
5455
5456static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5457{
5458 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005459 rxe_err_status_flags,
5460 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005461}
5462
5463static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5464{
5465 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005466 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005467}
5468
5469static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5470{
5471 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005472 pio_err_status_flags,
5473 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005474}
5475
5476static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5477{
5478 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005479 sdma_err_status_flags,
5480 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005481}
5482
5483static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5484{
5485 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005486 egress_err_status_flags,
5487 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005488}
5489
5490static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5491{
5492 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005493 egress_err_info_flags,
5494 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005495}
5496
5497static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5498{
5499 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005500 send_err_status_flags,
5501 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005502}
5503
5504static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5505{
5506 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005507 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005508
5509 /*
5510 * For most these errors, there is nothing that can be done except
5511 * report or record it.
5512 */
5513 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005514 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005515
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005516 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5517 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005518 /* this error requires a manual drop into SPC freeze mode */
5519 /* then a fix up */
5520 start_freeze_handling(dd->pport, FREEZE_SELF);
5521 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005522
5523 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5524 if (reg & (1ull << i)) {
5525 incr_cntr64(&dd->cce_err_status_cnt[i]);
5526 /* maintain a counter over all cce_err_status errors */
5527 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5528 }
5529 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005530}
5531
5532/*
5533 * Check counters for receive errors that do not have an interrupt
5534 * associated with them.
5535 */
5536#define RCVERR_CHECK_TIME 10
5537static void update_rcverr_timer(unsigned long opaque)
5538{
5539 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5540 struct hfi1_pportdata *ppd = dd->pport;
5541 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5542
5543 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005544 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005545 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005546 set_link_down_reason(
5547 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5548 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Sebastian Sanchez71d47002017-07-29 08:43:49 -07005549 queue_work(ppd->link_wq, &ppd->link_bounce_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005550 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005551 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005552
5553 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5554}
5555
5556static int init_rcverr(struct hfi1_devdata *dd)
5557{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305558 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005559 /* Assume the hardware counter has been reset */
5560 dd->rcv_ovfl_cnt = 0;
5561 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5562}
5563
5564static void free_rcverr(struct hfi1_devdata *dd)
5565{
5566 if (dd->rcverr_timer.data)
5567 del_timer_sync(&dd->rcverr_timer);
5568 dd->rcverr_timer.data = 0;
5569}
5570
5571static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5572{
5573 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005574 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005575
5576 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005577 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005578
5579 if (reg & ALL_RXE_FREEZE_ERR) {
5580 int flags = 0;
5581
5582 /*
5583 * Freeze mode recovery is disabled for the errors
5584 * in RXE_FREEZE_ABORT_MASK
5585 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005586 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005587 flags = FREEZE_ABORT;
5588
5589 start_freeze_handling(dd->pport, flags);
5590 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005591
5592 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5593 if (reg & (1ull << i))
5594 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5595 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005596}
5597
5598static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5599{
5600 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005601 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005602
5603 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005604 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005605 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5606 if (reg & (1ull << i))
5607 incr_cntr64(&dd->misc_err_status_cnt[i]);
5608 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005609}
5610
5611static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5612{
5613 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005614 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005615
5616 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005617 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005618
5619 if (reg & ALL_PIO_FREEZE_ERR)
5620 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005621
5622 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5623 if (reg & (1ull << i))
5624 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5625 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005626}
5627
5628static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5629{
5630 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005631 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005632
5633 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005634 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005635
5636 if (reg & ALL_SDMA_FREEZE_ERR)
5637 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005638
5639 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5640 if (reg & (1ull << i))
5641 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5642 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005643}
5644
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005645static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5646{
5647 incr_cntr64(&ppd->port_xmit_discards);
5648}
5649
Mike Marciniszyn77241052015-07-30 15:17:43 -04005650static void count_port_inactive(struct hfi1_devdata *dd)
5651{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005652 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005653}
5654
5655/*
5656 * We have had a "disallowed packet" error during egress. Determine the
5657 * integrity check which failed, and update relevant error counter, etc.
5658 *
5659 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5660 * bit of state per integrity check, and so we can miss the reason for an
5661 * egress error if more than one packet fails the same integrity check
5662 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5663 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005664static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5665 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005666{
5667 struct hfi1_pportdata *ppd = dd->pport;
5668 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5669 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5670 char buf[96];
5671
5672 /* clear down all observed info as quickly as possible after read */
5673 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5674
5675 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005676 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5677 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005678
5679 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005680 if (info & PORT_DISCARD_EGRESS_ERRS) {
5681 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005682
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005683 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005684 * Count all applicable bits as individual errors and
5685 * attribute them to the packet that triggered this handler.
5686 * This may not be completely accurate due to limitations
5687 * on the available hardware error information. There is
5688 * a single information register and any number of error
5689 * packets may have occurred and contributed to it before
5690 * this routine is called. This means that:
5691 * a) If multiple packets with the same error occur before
5692 * this routine is called, earlier packets are missed.
5693 * There is only a single bit for each error type.
5694 * b) Errors may not be attributed to the correct VL.
5695 * The driver is attributing all bits in the info register
5696 * to the packet that triggered this call, but bits
5697 * could be an accumulation of different packets with
5698 * different VLs.
5699 * c) A single error packet may have multiple counts attached
5700 * to it. There is no way for the driver to know if
5701 * multiple bits set in the info register are due to a
5702 * single packet or multiple packets. The driver assumes
5703 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005704 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005705 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005706 for (i = 0; i < weight; i++) {
5707 __count_port_discards(ppd);
5708 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5709 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5710 else if (vl == 15)
5711 incr_cntr64(&ppd->port_xmit_discards_vl
5712 [C_VL_15]);
5713 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005714 }
5715}
5716
5717/*
5718 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5719 * register. Does it represent a 'port inactive' error?
5720 */
5721static inline int port_inactive_err(u64 posn)
5722{
5723 return (posn >= SEES(TX_LINKDOWN) &&
5724 posn <= SEES(TX_INCORRECT_LINK_STATE));
5725}
5726
5727/*
5728 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5729 * register. Does it represent a 'disallowed packet' error?
5730 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005731static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005732{
5733 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5734 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5735}
5736
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005737/*
5738 * Input value is a bit position of one of the SDMA engine disallowed
5739 * packet errors. Return which engine. Use of this must be guarded by
5740 * disallowed_pkt_err().
5741 */
5742static inline int disallowed_pkt_engine(int posn)
5743{
5744 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5745}
5746
5747/*
5748 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5749 * be done.
5750 */
5751static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5752{
5753 struct sdma_vl_map *m;
5754 int vl;
5755
5756 /* range check */
5757 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5758 return -1;
5759
5760 rcu_read_lock();
5761 m = rcu_dereference(dd->sdma_map);
5762 vl = m->engine_to_vl[engine];
5763 rcu_read_unlock();
5764
5765 return vl;
5766}
5767
5768/*
5769 * Translate the send context (sofware index) into a VL. Return -1 if the
5770 * translation cannot be done.
5771 */
5772static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5773{
5774 struct send_context_info *sci;
5775 struct send_context *sc;
5776 int i;
5777
5778 sci = &dd->send_contexts[sw_index];
5779
5780 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005781 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005782 return -1;
5783
5784 sc = sci->sc;
5785 if (!sc)
5786 return -1;
5787 if (dd->vld[15].sc == sc)
5788 return 15;
5789 for (i = 0; i < num_vls; i++)
5790 if (dd->vld[i].sc == sc)
5791 return i;
5792
5793 return -1;
5794}
5795
Mike Marciniszyn77241052015-07-30 15:17:43 -04005796static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5797{
5798 u64 reg_copy = reg, handled = 0;
5799 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005800 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005801
5802 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5803 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005804 else if (is_ax(dd) &&
5805 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5806 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005807 start_freeze_handling(dd->pport, 0);
5808
5809 while (reg_copy) {
5810 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005811 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005812 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005813 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005814
5815 if (port_inactive_err(shift)) {
5816 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005817 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005818 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005819 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5820
5821 handle_send_egress_err_info(dd, vl);
5822 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005823 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005824 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005825 }
5826
5827 reg &= ~handled;
5828
5829 if (reg)
5830 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005831 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005832
5833 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5834 if (reg & (1ull << i))
5835 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5836 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005837}
5838
5839static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5840{
5841 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005842 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005843
5844 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005845 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005846
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005847 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5848 if (reg & (1ull << i))
5849 incr_cntr64(&dd->send_err_status_cnt[i]);
5850 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005851}
5852
5853/*
5854 * The maximum number of times the error clear down will loop before
5855 * blocking a repeating error. This value is arbitrary.
5856 */
5857#define MAX_CLEAR_COUNT 20
5858
5859/*
5860 * Clear and handle an error register. All error interrupts are funneled
5861 * through here to have a central location to correctly handle single-
5862 * or multi-shot errors.
5863 *
5864 * For non per-context registers, call this routine with a context value
5865 * of 0 so the per-context offset is zero.
5866 *
5867 * If the handler loops too many times, assume that something is wrong
5868 * and can't be fixed, so mask the error bits.
5869 */
5870static void interrupt_clear_down(struct hfi1_devdata *dd,
5871 u32 context,
5872 const struct err_reg_info *eri)
5873{
5874 u64 reg;
5875 u32 count;
5876
5877 /* read in a loop until no more errors are seen */
5878 count = 0;
5879 while (1) {
5880 reg = read_kctxt_csr(dd, context, eri->status);
5881 if (reg == 0)
5882 break;
5883 write_kctxt_csr(dd, context, eri->clear, reg);
5884 if (likely(eri->handler))
5885 eri->handler(dd, context, reg);
5886 count++;
5887 if (count > MAX_CLEAR_COUNT) {
5888 u64 mask;
5889
5890 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005891 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005892 /*
5893 * Read-modify-write so any other masked bits
5894 * remain masked.
5895 */
5896 mask = read_kctxt_csr(dd, context, eri->mask);
5897 mask &= ~reg;
5898 write_kctxt_csr(dd, context, eri->mask, mask);
5899 break;
5900 }
5901 }
5902}
5903
5904/*
5905 * CCE block "misc" interrupt. Source is < 16.
5906 */
5907static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5908{
5909 const struct err_reg_info *eri = &misc_errs[source];
5910
5911 if (eri->handler) {
5912 interrupt_clear_down(dd, 0, eri);
5913 } else {
5914 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005915 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005916 }
5917}
5918
5919static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5920{
5921 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005922 sc_err_status_flags,
5923 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005924}
5925
5926/*
5927 * Send context error interrupt. Source (hw_context) is < 160.
5928 *
5929 * All send context errors cause the send context to halt. The normal
5930 * clear-down mechanism cannot be used because we cannot clear the
5931 * error bits until several other long-running items are done first.
5932 * This is OK because with the context halted, nothing else is going
5933 * to happen on it anyway.
5934 */
5935static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5936 unsigned int hw_context)
5937{
5938 struct send_context_info *sci;
5939 struct send_context *sc;
5940 char flags[96];
5941 u64 status;
5942 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005943 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005944
5945 sw_index = dd->hw_to_sw[hw_context];
5946 if (sw_index >= dd->num_send_contexts) {
5947 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005948 "out of range sw index %u for send context %u\n",
5949 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005950 return;
5951 }
5952 sci = &dd->send_contexts[sw_index];
5953 sc = sci->sc;
5954 if (!sc) {
5955 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005956 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005957 return;
5958 }
5959
5960 /* tell the software that a halt has begun */
5961 sc_stop(sc, SCF_HALTED);
5962
5963 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5964
5965 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005966 send_context_err_status_string(flags, sizeof(flags),
5967 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005968
5969 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005970 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005971
5972 /*
5973 * Automatically restart halted kernel contexts out of interrupt
5974 * context. User contexts must ask the driver to restart the context.
5975 */
5976 if (sc->type != SC_USER)
5977 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005978
5979 /*
5980 * Update the counters for the corresponding status bits.
5981 * Note that these particular counters are aggregated over all
5982 * 160 contexts.
5983 */
5984 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5985 if (status & (1ull << i))
5986 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5987 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005988}
5989
5990static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5991 unsigned int source, u64 status)
5992{
5993 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005994 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005995
5996 sde = &dd->per_sdma[source];
5997#ifdef CONFIG_SDMA_VERBOSITY
5998 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5999 slashstrip(__FILE__), __LINE__, __func__);
6000 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6001 sde->this_idx, source, (unsigned long long)status);
6002#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05006003 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006004 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05006005
6006 /*
6007 * Update the counters for the corresponding status bits.
6008 * Note that these particular counters are aggregated over
6009 * all 16 DMA engines.
6010 */
6011 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6012 if (status & (1ull << i))
6013 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6014 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006015}
6016
6017/*
6018 * CCE block SDMA error interrupt. Source is < 16.
6019 */
6020static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6021{
6022#ifdef CONFIG_SDMA_VERBOSITY
6023 struct sdma_engine *sde = &dd->per_sdma[source];
6024
6025 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6026 slashstrip(__FILE__), __LINE__, __func__);
6027 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6028 source);
6029 sdma_dumpstate(sde);
6030#endif
6031 interrupt_clear_down(dd, source, &sdma_eng_err);
6032}
6033
6034/*
6035 * CCE block "various" interrupt. Source is < 8.
6036 */
6037static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6038{
6039 const struct err_reg_info *eri = &various_err[source];
6040
6041 /*
6042 * TCritInt cannot go through interrupt_clear_down()
6043 * because it is not a second tier interrupt. The handler
6044 * should be called directly.
6045 */
6046 if (source == TCRIT_INT_SOURCE)
6047 handle_temp_err(dd);
6048 else if (eri->handler)
6049 interrupt_clear_down(dd, 0, eri);
6050 else
6051 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006052 "%s: Unimplemented/reserved interrupt %d\n",
6053 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006054}
6055
6056static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6057{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006058 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006059 struct hfi1_pportdata *ppd = dd->pport;
6060 unsigned long flags;
6061 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6062
6063 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006064 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006065 dd_dev_info(dd, "%s: QSFP module removed\n",
6066 __func__);
6067
Mike Marciniszyn77241052015-07-30 15:17:43 -04006068 ppd->driver_link_ready = 0;
6069 /*
6070 * Cable removed, reset all our information about the
6071 * cache and cable capabilities
6072 */
6073
6074 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6075 /*
6076 * We don't set cache_refresh_required here as we expect
6077 * an interrupt when a cable is inserted
6078 */
6079 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006080 ppd->qsfp_info.reset_needed = 0;
6081 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006082 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006083 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006084 /* Invert the ModPresent pin now to detect plug-in */
6085 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6086 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006087
6088 if ((ppd->offline_disabled_reason >
6089 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006090 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006091 (ppd->offline_disabled_reason ==
6092 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6093 ppd->offline_disabled_reason =
6094 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006095 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006096
Mike Marciniszyn77241052015-07-30 15:17:43 -04006097 if (ppd->host_link_state == HLS_DN_POLL) {
6098 /*
6099 * The link is still in POLL. This means
6100 * that the normal link down processing
6101 * will not happen. We have to do it here
6102 * before turning the DC off.
6103 */
Sebastian Sanchez71d47002017-07-29 08:43:49 -07006104 queue_work(ppd->link_wq, &ppd->link_down_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006105 }
6106 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006107 dd_dev_info(dd, "%s: QSFP module inserted\n",
6108 __func__);
6109
Mike Marciniszyn77241052015-07-30 15:17:43 -04006110 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6111 ppd->qsfp_info.cache_valid = 0;
6112 ppd->qsfp_info.cache_refresh_required = 1;
6113 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006114 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006115
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006116 /*
6117 * Stop inversion of ModPresent pin to detect
6118 * removal of the cable
6119 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006120 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006121 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6122 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6123
6124 ppd->offline_disabled_reason =
6125 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006126 }
6127 }
6128
6129 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006130 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006131 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006132 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6133 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006134 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6135 }
6136
6137 /* Schedule the QSFP work only if there is a cable attached. */
6138 if (qsfp_mod_present(ppd))
Sebastian Sanchez71d47002017-07-29 08:43:49 -07006139 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006140}
6141
6142static int request_host_lcb_access(struct hfi1_devdata *dd)
6143{
6144 int ret;
6145
6146 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006147 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6148 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006149 if (ret != HCMD_SUCCESS) {
6150 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006151 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006152 }
6153 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6154}
6155
6156static int request_8051_lcb_access(struct hfi1_devdata *dd)
6157{
6158 int ret;
6159
6160 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006161 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6162 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006163 if (ret != HCMD_SUCCESS) {
6164 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006165 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006166 }
6167 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6168}
6169
6170/*
6171 * Set the LCB selector - allow host access. The DCC selector always
6172 * points to the host.
6173 */
6174static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6175{
6176 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006177 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6178 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006179}
6180
6181/*
6182 * Clear the LCB selector - allow 8051 access. The DCC selector always
6183 * points to the host.
6184 */
6185static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6186{
6187 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006188 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006189}
6190
6191/*
6192 * Acquire LCB access from the 8051. If the host already has access,
6193 * just increment a counter. Otherwise, inform the 8051 that the
6194 * host is taking access.
6195 *
6196 * Returns:
6197 * 0 on success
6198 * -EBUSY if the 8051 has control and cannot be disturbed
6199 * -errno if unable to acquire access from the 8051
6200 */
6201int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6202{
6203 struct hfi1_pportdata *ppd = dd->pport;
6204 int ret = 0;
6205
6206 /*
6207 * Use the host link state lock so the operation of this routine
6208 * { link state check, selector change, count increment } can occur
6209 * as a unit against a link state change. Otherwise there is a
6210 * race between the state change and the count increment.
6211 */
6212 if (sleep_ok) {
6213 mutex_lock(&ppd->hls_lock);
6214 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006215 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006216 udelay(1);
6217 }
6218
6219 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006220 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006221 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006222 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006223 ret = -EBUSY;
6224 goto done;
6225 }
6226
6227 if (dd->lcb_access_count == 0) {
6228 ret = request_host_lcb_access(dd);
6229 if (ret) {
6230 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006231 "%s: unable to acquire LCB access, err %d\n",
6232 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006233 goto done;
6234 }
6235 set_host_lcb_access(dd);
6236 }
6237 dd->lcb_access_count++;
6238done:
6239 mutex_unlock(&ppd->hls_lock);
6240 return ret;
6241}
6242
6243/*
6244 * Release LCB access by decrementing the use count. If the count is moving
6245 * from 1 to 0, inform 8051 that it has control back.
6246 *
6247 * Returns:
6248 * 0 on success
6249 * -errno if unable to release access to the 8051
6250 */
6251int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6252{
6253 int ret = 0;
6254
6255 /*
6256 * Use the host link state lock because the acquire needed it.
6257 * Here, we only need to keep { selector change, count decrement }
6258 * as a unit.
6259 */
6260 if (sleep_ok) {
6261 mutex_lock(&dd->pport->hls_lock);
6262 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006263 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006264 udelay(1);
6265 }
6266
6267 if (dd->lcb_access_count == 0) {
6268 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006269 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006270 goto done;
6271 }
6272
6273 if (dd->lcb_access_count == 1) {
6274 set_8051_lcb_access(dd);
6275 ret = request_8051_lcb_access(dd);
6276 if (ret) {
6277 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006278 "%s: unable to release LCB access, err %d\n",
6279 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006280 /* restore host access if the grant didn't work */
6281 set_host_lcb_access(dd);
6282 goto done;
6283 }
6284 }
6285 dd->lcb_access_count--;
6286done:
6287 mutex_unlock(&dd->pport->hls_lock);
6288 return ret;
6289}
6290
6291/*
6292 * Initialize LCB access variables and state. Called during driver load,
6293 * after most of the initialization is finished.
6294 *
6295 * The DC default is LCB access on for the host. The driver defaults to
6296 * leaving access to the 8051. Assign access now - this constrains the call
6297 * to this routine to be after all LCB set-up is done. In particular, after
6298 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6299 */
6300static void init_lcb_access(struct hfi1_devdata *dd)
6301{
6302 dd->lcb_access_count = 0;
6303}
6304
6305/*
6306 * Write a response back to a 8051 request.
6307 */
6308static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6309{
6310 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006311 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6312 (u64)return_code <<
6313 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6314 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006315}
6316
6317/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006318 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006319 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006320static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006321{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006322 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006323 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006324 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006325 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006326
6327 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6328 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6329 return; /* no request */
6330
6331 /* zero out COMPLETED so the response is seen */
6332 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6333
6334 /* extract request details */
6335 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6336 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6337 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6338 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6339
6340 switch (type) {
6341 case HREQ_LOAD_CONFIG:
6342 case HREQ_SAVE_CONFIG:
6343 case HREQ_READ_CONFIG:
6344 case HREQ_SET_TX_EQ_ABS:
6345 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006346 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006347 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006348 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006349 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6350 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006351 case HREQ_CONFIG_DONE:
6352 hreq_response(dd, HREQ_SUCCESS, 0);
6353 break;
6354
6355 case HREQ_INTERFACE_TEST:
6356 hreq_response(dd, HREQ_SUCCESS, data);
6357 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006358 default:
6359 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6360 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6361 break;
6362 }
6363}
6364
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006365/*
6366 * Set up allocation unit vaulue.
6367 */
6368void set_up_vau(struct hfi1_devdata *dd, u8 vau)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006369{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006370 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6371
6372 /* do not modify other values in the register */
6373 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6374 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6375 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006376}
6377
6378/*
6379 * Set up initial VL15 credits of the remote. Assumes the rest of
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006380 * the CM credit registers are zero from a previous global or credit reset.
6381 * Shared limit for VL15 will always be 0.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006382 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006383void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006384{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006385 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6386
6387 /* set initial values for total and shared credit limit */
6388 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6389 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6390
6391 /*
6392 * Set total limit to be equal to VL15 credits.
6393 * Leave shared limit at 0.
6394 */
6395 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6396 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006397
Dennis Dalessandroeacc8302016-10-17 04:19:52 -07006398 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6399 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006400}
6401
6402/*
6403 * Zero all credit details from the previous connection and
6404 * reset the CM manager's internal counters.
6405 */
6406void reset_link_credits(struct hfi1_devdata *dd)
6407{
6408 int i;
6409
6410 /* remove all previous VL credit limits */
6411 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006412 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006413 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006414 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006415 /* reset the CM block */
6416 pio_send_control(dd, PSC_CM_RESET);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006417 /* reset cached value */
6418 dd->vl15buf_cached = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006419}
6420
6421/* convert a vCU to a CU */
6422static u32 vcu_to_cu(u8 vcu)
6423{
6424 return 1 << vcu;
6425}
6426
6427/* convert a CU to a vCU */
6428static u8 cu_to_vcu(u32 cu)
6429{
6430 return ilog2(cu);
6431}
6432
6433/* convert a vAU to an AU */
6434static u32 vau_to_au(u8 vau)
6435{
6436 return 8 * (1 << vau);
6437}
6438
6439static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6440{
6441 ppd->sm_trap_qp = 0x0;
6442 ppd->sa_qp = 0x1;
6443}
6444
6445/*
6446 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6447 */
6448static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6449{
6450 u64 reg;
6451
6452 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6453 write_csr(dd, DC_LCB_CFG_RUN, 0);
6454 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6455 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006456 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006457 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6458 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6459 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006460 write_csr(dd, DCC_CFG_RESET, reg |
6461 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6462 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006463 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006464 if (!abort) {
6465 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6466 write_csr(dd, DCC_CFG_RESET, reg);
6467 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6468 }
6469}
6470
6471/*
6472 * This routine should be called after the link has been transitioned to
6473 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6474 * reset).
6475 *
6476 * The expectation is that the caller of this routine would have taken
6477 * care of properly transitioning the link into the correct state.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006478 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6479 * before calling this function.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006480 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006481static void _dc_shutdown(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006482{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006483 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006484
Tadeusz Struk22546b72017-04-28 10:40:02 -07006485 if (dd->dc_shutdown)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006486 return;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006487
Mike Marciniszyn77241052015-07-30 15:17:43 -04006488 dd->dc_shutdown = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006489 /* Shutdown the LCB */
6490 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006491 /*
6492 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006493 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006494 * itself.
6495 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006496 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6497}
6498
Tadeusz Struk22546b72017-04-28 10:40:02 -07006499static void dc_shutdown(struct hfi1_devdata *dd)
6500{
6501 mutex_lock(&dd->dc8051_lock);
6502 _dc_shutdown(dd);
6503 mutex_unlock(&dd->dc8051_lock);
6504}
6505
Jubin John4d114fd2016-02-14 20:21:43 -08006506/*
6507 * Calling this after the DC has been brought out of reset should not
6508 * do any damage.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006509 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6510 * before calling this function.
Jubin John4d114fd2016-02-14 20:21:43 -08006511 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006512static void _dc_start(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006513{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006514 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006515
Mike Marciniszyn77241052015-07-30 15:17:43 -04006516 if (!dd->dc_shutdown)
Tadeusz Struk22546b72017-04-28 10:40:02 -07006517 return;
6518
Mike Marciniszyn77241052015-07-30 15:17:43 -04006519 /* Take the 8051 out of reset */
6520 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6521 /* Wait until 8051 is ready */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006522 if (wait_fm_ready(dd, TIMEOUT_8051_START))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006523 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006524 __func__);
Tadeusz Struk22546b72017-04-28 10:40:02 -07006525
Mike Marciniszyn77241052015-07-30 15:17:43 -04006526 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6527 write_csr(dd, DCC_CFG_RESET, 0x10);
6528 /* lcb_shutdown() with abort=1 does not restore these */
6529 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006530 dd->dc_shutdown = 0;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006531}
6532
6533static void dc_start(struct hfi1_devdata *dd)
6534{
6535 mutex_lock(&dd->dc8051_lock);
6536 _dc_start(dd);
6537 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006538}
6539
6540/*
6541 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6542 */
6543static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6544{
6545 u64 rx_radr, tx_radr;
6546 u32 version;
6547
6548 if (dd->icode != ICODE_FPGA_EMULATION)
6549 return;
6550
6551 /*
6552 * These LCB defaults on emulator _s are good, nothing to do here:
6553 * LCB_CFG_TX_FIFOS_RADR
6554 * LCB_CFG_RX_FIFOS_RADR
6555 * LCB_CFG_LN_DCLK
6556 * LCB_CFG_IGNORE_LOST_RCLK
6557 */
6558 if (is_emulator_s(dd))
6559 return;
6560 /* else this is _p */
6561
6562 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006563 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006564 version = 0x2d; /* all B0 use 0x2d or higher settings */
6565
6566 if (version <= 0x12) {
6567 /* release 0x12 and below */
6568
6569 /*
6570 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6571 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6572 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6573 */
6574 rx_radr =
6575 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6576 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6577 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6578 /*
6579 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6580 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6581 */
6582 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6583 } else if (version <= 0x18) {
6584 /* release 0x13 up to 0x18 */
6585 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6586 rx_radr =
6587 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6588 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6589 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6590 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6591 } else if (version == 0x19) {
6592 /* release 0x19 */
6593 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6594 rx_radr =
6595 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6596 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6597 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6598 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6599 } else if (version == 0x1a) {
6600 /* release 0x1a */
6601 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6602 rx_radr =
6603 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6604 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6605 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6606 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6607 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6608 } else {
6609 /* release 0x1b and higher */
6610 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6611 rx_radr =
6612 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6613 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6614 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6615 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6616 }
6617
6618 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6619 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6620 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006621 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006622 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6623}
6624
6625/*
6626 * Handle a SMA idle message
6627 *
6628 * This is a work-queue function outside of the interrupt.
6629 */
6630void handle_sma_message(struct work_struct *work)
6631{
6632 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6633 sma_message_work);
6634 struct hfi1_devdata *dd = ppd->dd;
6635 u64 msg;
6636 int ret;
6637
Jubin John4d114fd2016-02-14 20:21:43 -08006638 /*
6639 * msg is bytes 1-4 of the 40-bit idle message - the command code
6640 * is stripped off
6641 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006642 ret = read_idle_sma(dd, &msg);
6643 if (ret)
6644 return;
6645 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6646 /*
6647 * React to the SMA message. Byte[1] (0 for us) is the command.
6648 */
6649 switch (msg & 0xff) {
6650 case SMA_IDLE_ARM:
6651 /*
6652 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6653 * State Transitions
6654 *
6655 * Only expected in INIT or ARMED, discard otherwise.
6656 */
6657 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6658 ppd->neighbor_normal = 1;
6659 break;
6660 case SMA_IDLE_ACTIVE:
6661 /*
6662 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6663 * State Transitions
6664 *
6665 * Can activate the node. Discard otherwise.
6666 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006667 if (ppd->host_link_state == HLS_UP_ARMED &&
6668 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006669 ppd->neighbor_normal = 1;
6670 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6671 if (ret)
6672 dd_dev_err(
6673 dd,
6674 "%s: received Active SMA idle message, couldn't set link to Active\n",
6675 __func__);
6676 }
6677 break;
6678 default:
6679 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006680 "%s: received unexpected SMA idle message 0x%llx\n",
6681 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006682 break;
6683 }
6684}
6685
6686static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6687{
6688 u64 rcvctrl;
6689 unsigned long flags;
6690
6691 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6692 rcvctrl = read_csr(dd, RCV_CTRL);
6693 rcvctrl |= add;
6694 rcvctrl &= ~clear;
6695 write_csr(dd, RCV_CTRL, rcvctrl);
6696 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6697}
6698
6699static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6700{
6701 adjust_rcvctrl(dd, add, 0);
6702}
6703
6704static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6705{
6706 adjust_rcvctrl(dd, 0, clear);
6707}
6708
6709/*
6710 * Called from all interrupt handlers to start handling an SPC freeze.
6711 */
6712void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6713{
6714 struct hfi1_devdata *dd = ppd->dd;
6715 struct send_context *sc;
6716 int i;
6717
6718 if (flags & FREEZE_SELF)
6719 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6720
6721 /* enter frozen mode */
6722 dd->flags |= HFI1_FROZEN;
6723
6724 /* notify all SDMA engines that they are going into a freeze */
6725 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6726
6727 /* do halt pre-handling on all enabled send contexts */
6728 for (i = 0; i < dd->num_send_contexts; i++) {
6729 sc = dd->send_contexts[i].sc;
6730 if (sc && (sc->flags & SCF_ENABLED))
6731 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6732 }
6733
6734 /* Send context are frozen. Notify user space */
6735 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6736
6737 if (flags & FREEZE_ABORT) {
6738 dd_dev_err(dd,
6739 "Aborted freeze recovery. Please REBOOT system\n");
6740 return;
6741 }
6742 /* queue non-interrupt handler */
6743 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6744}
6745
6746/*
6747 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6748 * depending on the "freeze" parameter.
6749 *
6750 * No need to return an error if it times out, our only option
6751 * is to proceed anyway.
6752 */
6753static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6754{
6755 unsigned long timeout;
6756 u64 reg;
6757
6758 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6759 while (1) {
6760 reg = read_csr(dd, CCE_STATUS);
6761 if (freeze) {
6762 /* waiting until all indicators are set */
6763 if ((reg & ALL_FROZE) == ALL_FROZE)
6764 return; /* all done */
6765 } else {
6766 /* waiting until all indicators are clear */
6767 if ((reg & ALL_FROZE) == 0)
6768 return; /* all done */
6769 }
6770
6771 if (time_after(jiffies, timeout)) {
6772 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006773 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6774 freeze ? "" : "un", reg & ALL_FROZE,
6775 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006776 return;
6777 }
6778 usleep_range(80, 120);
6779 }
6780}
6781
6782/*
6783 * Do all freeze handling for the RXE block.
6784 */
6785static void rxe_freeze(struct hfi1_devdata *dd)
6786{
6787 int i;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006788 struct hfi1_ctxtdata *rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006789
6790 /* disable port */
6791 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6792
6793 /* disable all receive contexts */
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006794 for (i = 0; i < dd->num_rcv_contexts; i++) {
6795 rcd = hfi1_rcd_get_by_index(dd, i);
6796 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6797 hfi1_rcd_put(rcd);
6798 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006799}
6800
6801/*
6802 * Unfreeze handling for the RXE block - kernel contexts only.
6803 * This will also enable the port. User contexts will do unfreeze
6804 * handling on a per-context basis as they call into the driver.
6805 *
6806 */
6807static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6808{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006809 u32 rcvmask;
Michael J. Ruhle6f76222017-07-24 07:45:55 -07006810 u16 i;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006811 struct hfi1_ctxtdata *rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006812
6813 /* enable all kernel contexts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07006814 for (i = 0; i < dd->num_rcv_contexts; i++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006815 rcd = hfi1_rcd_get_by_index(dd, i);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07006816
6817 /* Ensure all non-user contexts(including vnic) are enabled */
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006818 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
6819 hfi1_rcd_put(rcd);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07006820 continue;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006821 }
Mitko Haralanov566c1572016-02-03 14:32:49 -08006822 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6823 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
Michael J. Ruhl22505632017-07-24 07:46:06 -07006824 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
Mitko Haralanov566c1572016-02-03 14:32:49 -08006825 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhl22505632017-07-24 07:46:06 -07006826 hfi1_rcvctrl(dd, rcvmask, rcd);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07006827 hfi1_rcd_put(rcd);
Mitko Haralanov566c1572016-02-03 14:32:49 -08006828 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006829
6830 /* enable port */
6831 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6832}
6833
6834/*
6835 * Non-interrupt SPC freeze handling.
6836 *
6837 * This is a work-queue function outside of the triggering interrupt.
6838 */
6839void handle_freeze(struct work_struct *work)
6840{
6841 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6842 freeze_work);
6843 struct hfi1_devdata *dd = ppd->dd;
6844
6845 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006846 wait_for_freeze_status(dd, 1);
6847
6848 /* SPC is now frozen */
6849
6850 /* do send PIO freeze steps */
6851 pio_freeze(dd);
6852
6853 /* do send DMA freeze steps */
6854 sdma_freeze(dd);
6855
6856 /* do send egress freeze steps - nothing to do */
6857
6858 /* do receive freeze steps */
6859 rxe_freeze(dd);
6860
6861 /*
6862 * Unfreeze the hardware - clear the freeze, wait for each
6863 * block's frozen bit to clear, then clear the frozen flag.
6864 */
6865 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6866 wait_for_freeze_status(dd, 0);
6867
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006868 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006869 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6870 wait_for_freeze_status(dd, 1);
6871 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6872 wait_for_freeze_status(dd, 0);
6873 }
6874
6875 /* do send PIO unfreeze steps for kernel contexts */
6876 pio_kernel_unfreeze(dd);
6877
6878 /* do send DMA unfreeze steps */
6879 sdma_unfreeze(dd);
6880
6881 /* do send egress unfreeze steps - nothing to do */
6882
6883 /* do receive unfreeze steps for kernel contexts */
6884 rxe_kernel_unfreeze(dd);
6885
6886 /*
6887 * The unfreeze procedure touches global device registers when
6888 * it disables and re-enables RXE. Mark the device unfrozen
6889 * after all that is done so other parts of the driver waiting
6890 * for the device to unfreeze don't do things out of order.
6891 *
6892 * The above implies that the meaning of HFI1_FROZEN flag is
6893 * "Device has gone into freeze mode and freeze mode handling
6894 * is still in progress."
6895 *
6896 * The flag will be removed when freeze mode processing has
6897 * completed.
6898 */
6899 dd->flags &= ~HFI1_FROZEN;
6900 wake_up(&dd->event_queue);
6901
6902 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006903}
6904
6905/*
6906 * Handle a link up interrupt from the 8051.
6907 *
6908 * This is a work-queue function outside of the interrupt.
6909 */
6910void handle_link_up(struct work_struct *work)
6911{
6912 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006913 link_up_work);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006914 struct hfi1_devdata *dd = ppd->dd;
6915
Mike Marciniszyn77241052015-07-30 15:17:43 -04006916 set_link_state(ppd, HLS_UP_INIT);
6917
6918 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006919 read_ltp_rtt(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006920 /*
6921 * OPA specifies that certain counters are cleared on a transition
6922 * to link up, so do that.
6923 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006924 clear_linkup_counters(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006925 /*
6926 * And (re)set link up default values.
6927 */
6928 set_linkup_defaults(ppd);
6929
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006930 /*
6931 * Set VL15 credits. Use cached value from verify cap interrupt.
6932 * In case of quick linkup or simulator, vl15 value will be set by
6933 * handle_linkup_change. VerifyCap interrupt handler will not be
6934 * called in those scenarios.
6935 */
6936 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6937 set_up_vl15(dd, dd->vl15buf_cached);
6938
Mike Marciniszyn77241052015-07-30 15:17:43 -04006939 /* enforce link speed enabled */
6940 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6941 /* oops - current speed is not enabled, bounce */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006942 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006943 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6944 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006945 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006946 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006947 set_link_state(ppd, HLS_DN_OFFLINE);
6948 start_link(ppd);
6949 }
6950}
6951
Jubin John4d114fd2016-02-14 20:21:43 -08006952/*
6953 * Several pieces of LNI information were cached for SMA in ppd.
6954 * Reset these on link down
6955 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006956static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6957{
6958 ppd->neighbor_guid = 0;
6959 ppd->neighbor_port_number = 0;
6960 ppd->neighbor_type = 0;
6961 ppd->neighbor_fm_security = 0;
6962}
6963
Dean Luickfeb831d2016-04-14 08:31:36 -07006964static const char * const link_down_reason_strs[] = {
6965 [OPA_LINKDOWN_REASON_NONE] = "None",
Dennis Dalessandro67838e62017-05-29 17:18:46 -07006966 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
Dean Luickfeb831d2016-04-14 08:31:36 -07006967 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6968 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6969 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6970 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6971 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6972 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6973 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6974 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6975 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6976 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6977 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6978 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6979 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6980 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6981 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6982 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6983 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6984 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6985 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6986 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6987 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6988 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6989 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6990 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6991 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6992 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6993 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6994 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6995 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6996 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6997 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6998 "Excessive buffer overrun",
6999 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7000 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7001 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7002 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7003 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7004 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7005 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7006 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7007 "Local media not installed",
7008 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7009 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7010 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7011 "End to end not installed",
7012 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7013 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7014 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7015 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7016 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7017 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7018};
7019
7020/* return the neighbor link down reason string */
7021static const char *link_down_reason_str(u8 reason)
7022{
7023 const char *str = NULL;
7024
7025 if (reason < ARRAY_SIZE(link_down_reason_strs))
7026 str = link_down_reason_strs[reason];
7027 if (!str)
7028 str = "(invalid)";
7029
7030 return str;
7031}
7032
Mike Marciniszyn77241052015-07-30 15:17:43 -04007033/*
7034 * Handle a link down interrupt from the 8051.
7035 *
7036 * This is a work-queue function outside of the interrupt.
7037 */
7038void handle_link_down(struct work_struct *work)
7039{
7040 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07007041 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007042 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07007043 link_down_work);
7044 int was_up;
7045 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04007046
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007047 if ((ppd->host_link_state &
7048 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7049 ppd->port_type == PORT_TYPE_FIXED)
7050 ppd->offline_disabled_reason =
7051 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7052
7053 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07007054 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007055 set_link_state(ppd, HLS_DN_OFFLINE);
Sebastian Sanchez626c0772017-07-29 08:43:55 -07007056 xchg(&ppd->is_link_down_queued, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007057
Dean Luickfeb831d2016-04-14 08:31:36 -07007058 if (was_up) {
7059 lcl_reason = 0;
7060 /* link down reason is only valid if the link was up */
7061 read_link_down_reason(ppd->dd, &link_down_reason);
7062 switch (link_down_reason) {
7063 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7064 /* the link went down, no idle message reason */
7065 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7066 ldr_str);
7067 break;
7068 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7069 /*
7070 * The neighbor reason is only valid if an idle message
7071 * was received for it.
7072 */
7073 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7074 dd_dev_info(ppd->dd,
7075 "%sNeighbor link down message %d, %s\n",
7076 ldr_str, neigh_reason,
7077 link_down_reason_str(neigh_reason));
7078 break;
7079 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7080 dd_dev_info(ppd->dd,
7081 "%sHost requested link to go offline\n",
7082 ldr_str);
7083 break;
7084 default:
7085 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7086 ldr_str, link_down_reason);
7087 break;
7088 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007089
Dean Luickfeb831d2016-04-14 08:31:36 -07007090 /*
7091 * If no reason, assume peer-initiated but missed
7092 * LinkGoingDown idle flits.
7093 */
7094 if (neigh_reason == 0)
7095 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7096 } else {
7097 /* went down while polling or going up */
7098 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7099 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007100
7101 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7102
Dean Luick015e91f2016-04-14 08:31:42 -07007103 /* inform the SMA when the link transitions from up to down */
7104 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7105 ppd->neigh_link_down_reason.sma == 0) {
7106 ppd->local_link_down_reason.sma =
7107 ppd->local_link_down_reason.latest;
7108 ppd->neigh_link_down_reason.sma =
7109 ppd->neigh_link_down_reason.latest;
7110 }
7111
Mike Marciniszyn77241052015-07-30 15:17:43 -04007112 reset_neighbor_info(ppd);
7113
7114 /* disable the port */
7115 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7116
Jubin John4d114fd2016-02-14 20:21:43 -08007117 /*
7118 * If there is no cable attached, turn the DC off. Otherwise,
7119 * start the link bring up.
7120 */
Dean Luick0db9dec2016-09-06 04:35:20 -07007121 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04007122 dc_shutdown(ppd->dd);
Dean Luick0db9dec2016-09-06 04:35:20 -07007123 else
Mike Marciniszyn77241052015-07-30 15:17:43 -04007124 start_link(ppd);
7125}
7126
7127void handle_link_bounce(struct work_struct *work)
7128{
7129 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7130 link_bounce_work);
7131
7132 /*
7133 * Only do something if the link is currently up.
7134 */
7135 if (ppd->host_link_state & HLS_UP) {
7136 set_link_state(ppd, HLS_DN_OFFLINE);
7137 start_link(ppd);
7138 } else {
7139 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007140 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007141 }
7142}
7143
7144/*
7145 * Mask conversion: Capability exchange to Port LTP. The capability
7146 * exchange has an implicit 16b CRC that is mandatory.
7147 */
7148static int cap_to_port_ltp(int cap)
7149{
7150 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7151
7152 if (cap & CAP_CRC_14B)
7153 port_ltp |= PORT_LTP_CRC_MODE_14;
7154 if (cap & CAP_CRC_48B)
7155 port_ltp |= PORT_LTP_CRC_MODE_48;
7156 if (cap & CAP_CRC_12B_16B_PER_LANE)
7157 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7158
7159 return port_ltp;
7160}
7161
7162/*
7163 * Convert an OPA Port LTP mask to capability mask
7164 */
7165int port_ltp_to_cap(int port_ltp)
7166{
7167 int cap_mask = 0;
7168
7169 if (port_ltp & PORT_LTP_CRC_MODE_14)
7170 cap_mask |= CAP_CRC_14B;
7171 if (port_ltp & PORT_LTP_CRC_MODE_48)
7172 cap_mask |= CAP_CRC_48B;
7173 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7174 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7175
7176 return cap_mask;
7177}
7178
7179/*
7180 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7181 */
7182static int lcb_to_port_ltp(int lcb_crc)
7183{
7184 int port_ltp = 0;
7185
7186 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7187 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7188 else if (lcb_crc == LCB_CRC_48B)
7189 port_ltp = PORT_LTP_CRC_MODE_48;
7190 else if (lcb_crc == LCB_CRC_14B)
7191 port_ltp = PORT_LTP_CRC_MODE_14;
7192 else
7193 port_ltp = PORT_LTP_CRC_MODE_16;
7194
7195 return port_ltp;
7196}
7197
7198/*
7199 * Our neighbor has indicated that we are allowed to act as a fabric
7200 * manager, so place the full management partition key in the second
7201 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7202 * that we should already have the limited management partition key in
7203 * array element 1, and also that the port is not yet up when
7204 * add_full_mgmt_pkey() is invoked.
7205 */
7206static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7207{
7208 struct hfi1_devdata *dd = ppd->dd;
7209
Dennis Dalessandroa498fbc2017-04-09 10:17:06 -07007210 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
Dean Luick87645222015-12-01 15:38:21 -05007211 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7212 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7213 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007214 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7215 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007216 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007217}
7218
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007219static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007220{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007221 if (ppd->pkeys[2] != 0) {
7222 ppd->pkeys[2] = 0;
7223 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007224 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007225 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007226}
7227
Mike Marciniszyn77241052015-07-30 15:17:43 -04007228/*
7229 * Convert the given link width to the OPA link width bitmask.
7230 */
7231static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7232{
7233 switch (width) {
7234 case 0:
7235 /*
7236 * Simulator and quick linkup do not set the width.
7237 * Just set it to 4x without complaint.
7238 */
7239 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7240 return OPA_LINK_WIDTH_4X;
7241 return 0; /* no lanes up */
7242 case 1: return OPA_LINK_WIDTH_1X;
7243 case 2: return OPA_LINK_WIDTH_2X;
7244 case 3: return OPA_LINK_WIDTH_3X;
7245 default:
7246 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007247 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007248 /* fall through */
7249 case 4: return OPA_LINK_WIDTH_4X;
7250 }
7251}
7252
7253/*
7254 * Do a population count on the bottom nibble.
7255 */
7256static const u8 bit_counts[16] = {
7257 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7258};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007259
Mike Marciniszyn77241052015-07-30 15:17:43 -04007260static inline u8 nibble_to_count(u8 nibble)
7261{
7262 return bit_counts[nibble & 0xf];
7263}
7264
7265/*
7266 * Read the active lane information from the 8051 registers and return
7267 * their widths.
7268 *
7269 * Active lane information is found in these 8051 registers:
7270 * enable_lane_tx
7271 * enable_lane_rx
7272 */
7273static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7274 u16 *rx_width)
7275{
7276 u16 tx, rx;
7277 u8 enable_lane_rx;
7278 u8 enable_lane_tx;
7279 u8 tx_polarity_inversion;
7280 u8 rx_polarity_inversion;
7281 u8 max_rate;
7282
7283 /* read the active lanes */
7284 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007285 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007286 read_local_lni(dd, &enable_lane_rx);
7287
7288 /* convert to counts */
7289 tx = nibble_to_count(enable_lane_tx);
7290 rx = nibble_to_count(enable_lane_rx);
7291
7292 /*
7293 * Set link_speed_active here, overriding what was set in
7294 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7295 * set the max_rate field in handle_verify_cap until v0.19.
7296 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007297 if ((dd->icode == ICODE_RTL_SILICON) &&
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007298 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007299 /* max_rate: 0 = 12.5G, 1 = 25G */
7300 switch (max_rate) {
7301 case 0:
7302 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7303 break;
7304 default:
7305 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007306 "%s: unexpected max rate %d, using 25Gb\n",
7307 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007308 /* fall through */
7309 case 1:
7310 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7311 break;
7312 }
7313 }
7314
7315 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007316 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7317 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007318 *tx_width = link_width_to_bits(dd, tx);
7319 *rx_width = link_width_to_bits(dd, rx);
7320}
7321
7322/*
7323 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7324 * Valid after the end of VerifyCap and during LinkUp. Does not change
7325 * after link up. I.e. look elsewhere for downgrade information.
7326 *
7327 * Bits are:
7328 * + bits [7:4] contain the number of active transmitters
7329 * + bits [3:0] contain the number of active receivers
7330 * These are numbers 1 through 4 and can be different values if the
7331 * link is asymmetric.
7332 *
7333 * verify_cap_local_fm_link_width[0] retains its original value.
7334 */
7335static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7336 u16 *rx_width)
7337{
7338 u16 widths, tx, rx;
7339 u8 misc_bits, local_flags;
7340 u16 active_tx, active_rx;
7341
7342 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7343 tx = widths >> 12;
7344 rx = (widths >> 8) & 0xf;
7345
7346 *tx_width = link_width_to_bits(dd, tx);
7347 *rx_width = link_width_to_bits(dd, rx);
7348
7349 /* print the active widths */
7350 get_link_widths(dd, &active_tx, &active_rx);
7351}
7352
7353/*
7354 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7355 * hardware information when the link first comes up.
7356 *
7357 * The link width is not available until after VerifyCap.AllFramesReceived
7358 * (the trigger for handle_verify_cap), so this is outside that routine
7359 * and should be called when the 8051 signals linkup.
7360 */
7361void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7362{
7363 u16 tx_width, rx_width;
7364
7365 /* get end-of-LNI link widths */
7366 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7367
7368 /* use tx_width as the link is supposed to be symmetric on link up */
7369 ppd->link_width_active = tx_width;
7370 /* link width downgrade active (LWD.A) starts out matching LW.A */
7371 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7372 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7373 /* per OPA spec, on link up LWD.E resets to LWD.S */
7374 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7375 /* cache the active egress rate (units {10^6 bits/sec]) */
7376 ppd->current_egress_rate = active_egress_rate(ppd);
7377}
7378
7379/*
7380 * Handle a verify capabilities interrupt from the 8051.
7381 *
7382 * This is a work-queue function outside of the interrupt.
7383 */
7384void handle_verify_cap(struct work_struct *work)
7385{
7386 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7387 link_vc_work);
7388 struct hfi1_devdata *dd = ppd->dd;
7389 u64 reg;
7390 u8 power_management;
Colin Ian Kinga63aa5d2017-07-13 23:13:38 +01007391 u8 continuous;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007392 u8 vcu;
7393 u8 vau;
7394 u8 z;
7395 u16 vl15buf;
7396 u16 link_widths;
7397 u16 crc_mask;
7398 u16 crc_val;
7399 u16 device_id;
7400 u16 active_tx, active_rx;
7401 u8 partner_supported_crc;
7402 u8 remote_tx_rate;
7403 u8 device_rev;
7404
7405 set_link_state(ppd, HLS_VERIFY_CAP);
7406
7407 lcb_shutdown(dd, 0);
7408 adjust_lcb_for_fpga_serdes(dd);
7409
Colin Ian Kinga63aa5d2017-07-13 23:13:38 +01007410 read_vc_remote_phy(dd, &power_management, &continuous);
Jubin John17fb4f22016-02-14 20:21:52 -08007411 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7412 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007413 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7414 read_remote_device_id(dd, &device_id, &device_rev);
7415 /*
7416 * And the 'MgmtAllowed' information, which is exchanged during
7417 * LNI, is also be available at this point.
7418 */
7419 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7420 /* print the active widths */
7421 get_link_widths(dd, &active_tx, &active_rx);
7422 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007423 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
Colin Ian Kinga63aa5d2017-07-13 23:13:38 +01007424 (int)power_management, (int)continuous);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007425 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007426 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7427 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7428 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007429 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007430 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007431 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007432 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007433 /*
7434 * The peer vAU value just read is the peer receiver value. HFI does
7435 * not support a transmit vAU of 0 (AU == 8). We advertised that
7436 * with Z=1 in the fabric capabilities sent to the peer. The peer
7437 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7438 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7439 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7440 * subject to the Z value exception.
7441 */
7442 if (vau == 0)
7443 vau = 1;
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07007444 set_up_vau(dd, vau);
7445
7446 /*
7447 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7448 * credits value and wait for link-up interrupt ot set it.
7449 */
7450 set_up_vl15(dd, 0);
7451 dd->vl15buf_cached = vl15buf;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007452
7453 /* set up the LCB CRC mode */
7454 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7455
7456 /* order is important: use the lowest bit in common */
7457 if (crc_mask & CAP_CRC_14B)
7458 crc_val = LCB_CRC_14B;
7459 else if (crc_mask & CAP_CRC_48B)
7460 crc_val = LCB_CRC_48B;
7461 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7462 crc_val = LCB_CRC_12B_16B_PER_LANE;
7463 else
7464 crc_val = LCB_CRC_16B;
7465
7466 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7467 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7468 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7469
7470 /* set (14b only) or clear sideband credit */
7471 reg = read_csr(dd, SEND_CM_CTRL);
7472 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7473 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007474 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007475 } else {
7476 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007477 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007478 }
7479
7480 ppd->link_speed_active = 0; /* invalid value */
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007481 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007482 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7483 switch (remote_tx_rate) {
7484 case 0:
7485 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7486 break;
7487 case 1:
7488 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7489 break;
7490 }
7491 } else {
7492 /* actual rate is highest bit of the ANDed rates */
7493 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7494
7495 if (rate & 2)
7496 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7497 else if (rate & 1)
7498 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7499 }
7500 if (ppd->link_speed_active == 0) {
7501 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007502 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007503 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7504 }
7505
7506 /*
7507 * Cache the values of the supported, enabled, and active
7508 * LTP CRC modes to return in 'portinfo' queries. But the bit
7509 * flags that are returned in the portinfo query differ from
7510 * what's in the link_crc_mask, crc_sizes, and crc_val
7511 * variables. Convert these here.
7512 */
7513 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7514 /* supported crc modes */
7515 ppd->port_ltp_crc_mode |=
7516 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7517 /* enabled crc modes */
7518 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7519 /* active crc mode */
7520
7521 /* set up the remote credit return table */
7522 assign_remote_cm_au_table(dd, vcu);
7523
7524 /*
7525 * The LCB is reset on entry to handle_verify_cap(), so this must
7526 * be applied on every link up.
7527 *
7528 * Adjust LCB error kill enable to kill the link if
7529 * these RBUF errors are seen:
7530 * REPLAY_BUF_MBE_SMASK
7531 * FLIT_INPUT_BUF_MBE_SMASK
7532 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007533 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007534 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7535 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7536 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7537 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7538 }
7539
7540 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7541 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7542
7543 /* give 8051 access to the LCB CSRs */
7544 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7545 set_8051_lcb_access(dd);
7546
Mike Marciniszyn77241052015-07-30 15:17:43 -04007547 if (ppd->mgmt_allowed)
7548 add_full_mgmt_pkey(ppd);
7549
7550 /* tell the 8051 to go to LinkUp */
7551 set_link_state(ppd, HLS_GOING_UP);
7552}
7553
7554/*
7555 * Apply the link width downgrade enabled policy against the current active
7556 * link widths.
7557 *
7558 * Called when the enabled policy changes or the active link widths change.
7559 */
7560void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7561{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007562 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007563 int tries;
7564 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007565 u16 tx, rx;
7566
Dean Luick323fd782015-11-16 21:59:24 -05007567 /* use the hls lock to avoid a race with actual link up */
7568 tries = 0;
7569retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007570 mutex_lock(&ppd->hls_lock);
7571 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007572 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007573 /* still going up..wait and retry */
7574 if (ppd->host_link_state & HLS_GOING_UP) {
7575 if (++tries < 1000) {
7576 mutex_unlock(&ppd->hls_lock);
7577 usleep_range(100, 120); /* arbitrary */
7578 goto retry;
7579 }
7580 dd_dev_err(ppd->dd,
7581 "%s: giving up waiting for link state change\n",
7582 __func__);
7583 }
7584 goto done;
7585 }
7586
7587 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007588
7589 if (refresh_widths) {
7590 get_link_widths(ppd->dd, &tx, &rx);
7591 ppd->link_width_downgrade_tx_active = tx;
7592 ppd->link_width_downgrade_rx_active = rx;
7593 }
7594
Dean Luickf9b56352016-04-14 08:31:30 -07007595 if (ppd->link_width_downgrade_tx_active == 0 ||
7596 ppd->link_width_downgrade_rx_active == 0) {
7597 /* the 8051 reported a dead link as a downgrade */
7598 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7599 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007600 /* downgrade is disabled */
7601
7602 /* bounce if not at starting active width */
7603 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007604 ppd->link_width_downgrade_tx_active) ||
7605 (ppd->link_width_active !=
7606 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007607 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007608 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007609 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007610 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7611 ppd->link_width_active,
7612 ppd->link_width_downgrade_tx_active,
7613 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007614 do_bounce = 1;
7615 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007616 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7617 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007618 /* Tx or Rx is outside the enabled policy */
7619 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007620 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007621 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007622 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7623 lwde, ppd->link_width_downgrade_tx_active,
7624 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007625 do_bounce = 1;
7626 }
7627
Dean Luick323fd782015-11-16 21:59:24 -05007628done:
7629 mutex_unlock(&ppd->hls_lock);
7630
Mike Marciniszyn77241052015-07-30 15:17:43 -04007631 if (do_bounce) {
7632 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007633 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007634 set_link_state(ppd, HLS_DN_OFFLINE);
7635 start_link(ppd);
7636 }
7637}
7638
7639/*
7640 * Handle a link downgrade interrupt from the 8051.
7641 *
7642 * This is a work-queue function outside of the interrupt.
7643 */
7644void handle_link_downgrade(struct work_struct *work)
7645{
7646 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7647 link_downgrade_work);
7648
7649 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7650 apply_link_downgrade_policy(ppd, 1);
7651}
7652
7653static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7654{
7655 return flag_string(buf, buf_len, flags, dcc_err_flags,
7656 ARRAY_SIZE(dcc_err_flags));
7657}
7658
7659static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7660{
7661 return flag_string(buf, buf_len, flags, lcb_err_flags,
7662 ARRAY_SIZE(lcb_err_flags));
7663}
7664
7665static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7666{
7667 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7668 ARRAY_SIZE(dc8051_err_flags));
7669}
7670
7671static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7672{
7673 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7674 ARRAY_SIZE(dc8051_info_err_flags));
7675}
7676
7677static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7678{
7679 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7680 ARRAY_SIZE(dc8051_info_host_msg_flags));
7681}
7682
7683static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7684{
7685 struct hfi1_pportdata *ppd = dd->pport;
7686 u64 info, err, host_msg;
7687 int queue_link_down = 0;
7688 char buf[96];
7689
7690 /* look at the flags */
7691 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7692 /* 8051 information set by firmware */
7693 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7694 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7695 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7696 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7697 host_msg = (info >>
7698 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7699 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7700
7701 /*
7702 * Handle error flags.
7703 */
7704 if (err & FAILED_LNI) {
7705 /*
7706 * LNI error indications are cleared by the 8051
7707 * only when starting polling. Only pay attention
7708 * to them when in the states that occur during
7709 * LNI.
7710 */
7711 if (ppd->host_link_state
7712 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7713 queue_link_down = 1;
7714 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007715 dc8051_info_err_string(buf,
7716 sizeof(buf),
7717 err &
7718 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007719 }
7720 err &= ~(u64)FAILED_LNI;
7721 }
Dean Luick6d014532015-12-01 15:38:23 -05007722 /* unknown frames can happen durning LNI, just count */
7723 if (err & UNKNOWN_FRAME) {
7724 ppd->unknown_frame_count++;
7725 err &= ~(u64)UNKNOWN_FRAME;
7726 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007727 if (err) {
7728 /* report remaining errors, but do not do anything */
7729 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007730 dc8051_info_err_string(buf, sizeof(buf),
7731 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007732 }
7733
7734 /*
7735 * Handle host message flags.
7736 */
7737 if (host_msg & HOST_REQ_DONE) {
7738 /*
7739 * Presently, the driver does a busy wait for
7740 * host requests to complete. This is only an
7741 * informational message.
7742 * NOTE: The 8051 clears the host message
7743 * information *on the next 8051 command*.
7744 * Therefore, when linkup is achieved,
7745 * this flag will still be set.
7746 */
7747 host_msg &= ~(u64)HOST_REQ_DONE;
7748 }
7749 if (host_msg & BC_SMA_MSG) {
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007750 queue_work(ppd->link_wq, &ppd->sma_message_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007751 host_msg &= ~(u64)BC_SMA_MSG;
7752 }
7753 if (host_msg & LINKUP_ACHIEVED) {
7754 dd_dev_info(dd, "8051: Link up\n");
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007755 queue_work(ppd->link_wq, &ppd->link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007756 host_msg &= ~(u64)LINKUP_ACHIEVED;
7757 }
7758 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007759 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007760 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7761 }
7762 if (host_msg & VERIFY_CAP_FRAME) {
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007763 queue_work(ppd->link_wq, &ppd->link_vc_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007764 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7765 }
7766 if (host_msg & LINK_GOING_DOWN) {
7767 const char *extra = "";
7768 /* no downgrade action needed if going down */
7769 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7770 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7771 extra = " (ignoring downgrade)";
7772 }
7773 dd_dev_info(dd, "8051: Link down%s\n", extra);
7774 queue_link_down = 1;
7775 host_msg &= ~(u64)LINK_GOING_DOWN;
7776 }
7777 if (host_msg & LINK_WIDTH_DOWNGRADED) {
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007778 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007779 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7780 }
7781 if (host_msg) {
7782 /* report remaining messages, but do not do anything */
7783 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007784 dc8051_info_host_msg_string(buf,
7785 sizeof(buf),
7786 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007787 }
7788
7789 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7790 }
7791 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7792 /*
7793 * Lost the 8051 heartbeat. If this happens, we
7794 * receive constant interrupts about it. Disable
7795 * the interrupt after the first.
7796 */
7797 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7798 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007799 read_csr(dd, DC_DC8051_ERR_EN) &
7800 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007801
7802 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7803 }
7804 if (reg) {
7805 /* report the error, but do not do anything */
7806 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007807 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007808 }
7809
7810 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007811 /*
7812 * if the link is already going down or disabled, do not
7813 * queue another
7814 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007815 if ((ppd->host_link_state &
7816 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
Sebastian Sanchez626c0772017-07-29 08:43:55 -07007817 ppd->link_enabled == 0 || ppd->is_link_down_queued) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007818 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007819 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007820 } else {
Sebastian Sanchez626c0772017-07-29 08:43:55 -07007821 xchg(&ppd->is_link_down_queued, 1);
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007822 queue_work(ppd->link_wq, &ppd->link_down_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007823 }
7824 }
7825}
7826
7827static const char * const fm_config_txt[] = {
7828[0] =
7829 "BadHeadDist: Distance violation between two head flits",
7830[1] =
7831 "BadTailDist: Distance violation between two tail flits",
7832[2] =
7833 "BadCtrlDist: Distance violation between two credit control flits",
7834[3] =
7835 "BadCrdAck: Credits return for unsupported VL",
7836[4] =
7837 "UnsupportedVLMarker: Received VL Marker",
7838[5] =
7839 "BadPreempt: Exceeded the preemption nesting level",
7840[6] =
7841 "BadControlFlit: Received unsupported control flit",
7842/* no 7 */
7843[8] =
7844 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7845};
7846
7847static const char * const port_rcv_txt[] = {
7848[1] =
7849 "BadPktLen: Illegal PktLen",
7850[2] =
7851 "PktLenTooLong: Packet longer than PktLen",
7852[3] =
7853 "PktLenTooShort: Packet shorter than PktLen",
7854[4] =
7855 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7856[5] =
7857 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7858[6] =
7859 "BadL2: Illegal L2 opcode",
7860[7] =
7861 "BadSC: Unsupported SC",
7862[9] =
7863 "BadRC: Illegal RC",
7864[11] =
7865 "PreemptError: Preempting with same VL",
7866[12] =
7867 "PreemptVL15: Preempting a VL15 packet",
7868};
7869
7870#define OPA_LDR_FMCONFIG_OFFSET 16
7871#define OPA_LDR_PORTRCV_OFFSET 0
7872static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7873{
7874 u64 info, hdr0, hdr1;
7875 const char *extra;
7876 char buf[96];
7877 struct hfi1_pportdata *ppd = dd->pport;
7878 u8 lcl_reason = 0;
7879 int do_bounce = 0;
7880
7881 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7882 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7883 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7884 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7885 /* set status bit */
7886 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7887 }
7888 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7889 }
7890
7891 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7892 struct hfi1_pportdata *ppd = dd->pport;
7893 /* this counter saturates at (2^32) - 1 */
7894 if (ppd->link_downed < (u32)UINT_MAX)
7895 ppd->link_downed++;
7896 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7897 }
7898
7899 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7900 u8 reason_valid = 1;
7901
7902 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7903 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7904 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7905 /* set status bit */
7906 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7907 }
7908 switch (info) {
7909 case 0:
7910 case 1:
7911 case 2:
7912 case 3:
7913 case 4:
7914 case 5:
7915 case 6:
7916 extra = fm_config_txt[info];
7917 break;
7918 case 8:
7919 extra = fm_config_txt[info];
7920 if (ppd->port_error_action &
7921 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7922 do_bounce = 1;
7923 /*
7924 * lcl_reason cannot be derived from info
7925 * for this error
7926 */
7927 lcl_reason =
7928 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7929 }
7930 break;
7931 default:
7932 reason_valid = 0;
7933 snprintf(buf, sizeof(buf), "reserved%lld", info);
7934 extra = buf;
7935 break;
7936 }
7937
7938 if (reason_valid && !do_bounce) {
7939 do_bounce = ppd->port_error_action &
7940 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7941 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7942 }
7943
7944 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007945 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7946 extra);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007947 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7948 }
7949
7950 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7951 u8 reason_valid = 1;
7952
7953 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7954 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7955 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7956 if (!(dd->err_info_rcvport.status_and_code &
7957 OPA_EI_STATUS_SMASK)) {
7958 dd->err_info_rcvport.status_and_code =
7959 info & OPA_EI_CODE_SMASK;
7960 /* set status bit */
7961 dd->err_info_rcvport.status_and_code |=
7962 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007963 /*
7964 * save first 2 flits in the packet that caused
7965 * the error
7966 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007967 dd->err_info_rcvport.packet_flit1 = hdr0;
7968 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007969 }
7970 switch (info) {
7971 case 1:
7972 case 2:
7973 case 3:
7974 case 4:
7975 case 5:
7976 case 6:
7977 case 7:
7978 case 9:
7979 case 11:
7980 case 12:
7981 extra = port_rcv_txt[info];
7982 break;
7983 default:
7984 reason_valid = 0;
7985 snprintf(buf, sizeof(buf), "reserved%lld", info);
7986 extra = buf;
7987 break;
7988 }
7989
7990 if (reason_valid && !do_bounce) {
7991 do_bounce = ppd->port_error_action &
7992 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7993 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7994 }
7995
7996 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007997 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7998 " hdr0 0x%llx, hdr1 0x%llx\n",
7999 extra, hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008000
8001 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8002 }
8003
8004 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8005 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008006 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008007 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8008 }
8009 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8010 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008011 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008012 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8013 }
8014
Don Hiatt243d9f42017-03-20 17:26:20 -07008015 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8016 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8017
Mike Marciniszyn77241052015-07-30 15:17:43 -04008018 /* report any remaining errors */
8019 if (reg)
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008020 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8021 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008022
8023 if (lcl_reason == 0)
8024 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8025
8026 if (do_bounce) {
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008027 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8028 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008029 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
Sebastian Sanchez71d47002017-07-29 08:43:49 -07008030 queue_work(ppd->link_wq, &ppd->link_bounce_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008031 }
8032}
8033
8034static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8035{
8036 char buf[96];
8037
8038 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008039 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008040}
8041
8042/*
8043 * CCE block DC interrupt. Source is < 8.
8044 */
8045static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8046{
8047 const struct err_reg_info *eri = &dc_errs[source];
8048
8049 if (eri->handler) {
8050 interrupt_clear_down(dd, 0, eri);
8051 } else if (source == 3 /* dc_lbm_int */) {
8052 /*
8053 * This indicates that a parity error has occurred on the
8054 * address/control lines presented to the LBM. The error
8055 * is a single pulse, there is no associated error flag,
8056 * and it is non-maskable. This is because if a parity
8057 * error occurs on the request the request is dropped.
8058 * This should never occur, but it is nice to know if it
8059 * ever does.
8060 */
8061 dd_dev_err(dd, "Parity error in DC LBM block\n");
8062 } else {
8063 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8064 }
8065}
8066
8067/*
8068 * TX block send credit interrupt. Source is < 160.
8069 */
8070static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8071{
8072 sc_group_release_update(dd, source);
8073}
8074
8075/*
8076 * TX block SDMA interrupt. Source is < 48.
8077 *
8078 * SDMA interrupts are grouped by type:
8079 *
8080 * 0 - N-1 = SDma
8081 * N - 2N-1 = SDmaProgress
8082 * 2N - 3N-1 = SDmaIdle
8083 */
8084static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8085{
8086 /* what interrupt */
8087 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8088 /* which engine */
8089 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8090
8091#ifdef CONFIG_SDMA_VERBOSITY
8092 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8093 slashstrip(__FILE__), __LINE__, __func__);
8094 sdma_dumpstate(&dd->per_sdma[which]);
8095#endif
8096
8097 if (likely(what < 3 && which < dd->num_sdma)) {
8098 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8099 } else {
8100 /* should not happen */
8101 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8102 }
8103}
8104
8105/*
8106 * RX block receive available interrupt. Source is < 160.
8107 */
8108static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8109{
8110 struct hfi1_ctxtdata *rcd;
8111 char *err_detail;
8112
8113 if (likely(source < dd->num_rcv_contexts)) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07008114 rcd = hfi1_rcd_get_by_index(dd, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008115 if (rcd) {
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008116 /* Check for non-user contexts, including vnic */
8117 if ((source < dd->first_dyn_alloc_ctxt) ||
8118 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
Dean Luickf4f30031c2015-10-26 10:28:44 -04008119 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008120 else
8121 handle_user_interrupt(rcd);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07008122
8123 hfi1_rcd_put(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008124 return; /* OK */
8125 }
8126 /* received an interrupt, but no rcd */
8127 err_detail = "dataless";
8128 } else {
8129 /* received an interrupt, but are not using that context */
8130 err_detail = "out of range";
8131 }
8132 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008133 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008134}
8135
8136/*
8137 * RX block receive urgent interrupt. Source is < 160.
8138 */
8139static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8140{
8141 struct hfi1_ctxtdata *rcd;
8142 char *err_detail;
8143
8144 if (likely(source < dd->num_rcv_contexts)) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07008145 rcd = hfi1_rcd_get_by_index(dd, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008146 if (rcd) {
8147 /* only pay attention to user urgent interrupts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008148 if ((source >= dd->first_dyn_alloc_ctxt) &&
8149 (!rcd->sc || (rcd->sc->type == SC_USER)))
Mike Marciniszyn77241052015-07-30 15:17:43 -04008150 handle_user_interrupt(rcd);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07008151
8152 hfi1_rcd_put(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008153 return; /* OK */
8154 }
8155 /* received an interrupt, but no rcd */
8156 err_detail = "dataless";
8157 } else {
8158 /* received an interrupt, but are not using that context */
8159 err_detail = "out of range";
8160 }
8161 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008162 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008163}
8164
8165/*
8166 * Reserved range interrupt. Should not be called in normal operation.
8167 */
8168static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8169{
8170 char name[64];
8171
8172 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008173 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008174}
8175
8176static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008177/*
8178 * start end
8179 * name func interrupt func
8180 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008181{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8182 is_misc_err_name, is_misc_err_int },
8183{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8184 is_sdma_eng_err_name, is_sdma_eng_err_int },
8185{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8186 is_sendctxt_err_name, is_sendctxt_err_int },
8187{ IS_SDMA_START, IS_SDMA_END,
8188 is_sdma_eng_name, is_sdma_eng_int },
8189{ IS_VARIOUS_START, IS_VARIOUS_END,
8190 is_various_name, is_various_int },
8191{ IS_DC_START, IS_DC_END,
8192 is_dc_name, is_dc_int },
8193{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8194 is_rcv_avail_name, is_rcv_avail_int },
8195{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8196 is_rcv_urgent_name, is_rcv_urgent_int },
8197{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8198 is_send_credit_name, is_send_credit_int},
8199{ IS_RESERVED_START, IS_RESERVED_END,
8200 is_reserved_name, is_reserved_int},
8201};
8202
8203/*
8204 * Interrupt source interrupt - called when the given source has an interrupt.
8205 * Source is a bit index into an array of 64-bit integers.
8206 */
8207static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8208{
8209 const struct is_table *entry;
8210
8211 /* avoids a double compare by walking the table in-order */
8212 for (entry = &is_table[0]; entry->is_name; entry++) {
8213 if (source < entry->end) {
8214 trace_hfi1_interrupt(dd, entry, source);
8215 entry->is_int(dd, source - entry->start);
8216 return;
8217 }
8218 }
8219 /* fell off the end */
8220 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8221}
8222
8223/*
8224 * General interrupt handler. This is able to correctly handle
8225 * all interrupts in case INTx is used.
8226 */
8227static irqreturn_t general_interrupt(int irq, void *data)
8228{
8229 struct hfi1_devdata *dd = data;
8230 u64 regs[CCE_NUM_INT_CSRS];
8231 u32 bit;
8232 int i;
8233
8234 this_cpu_inc(*dd->int_counter);
8235
8236 /* phase 1: scan and clear all handled interrupts */
8237 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8238 if (dd->gi_mask[i] == 0) {
8239 regs[i] = 0; /* used later */
8240 continue;
8241 }
8242 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8243 dd->gi_mask[i];
8244 /* only clear if anything is set */
8245 if (regs[i])
8246 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8247 }
8248
8249 /* phase 2: call the appropriate handler */
8250 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008251 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008252 is_interrupt(dd, bit);
8253 }
8254
8255 return IRQ_HANDLED;
8256}
8257
8258static irqreturn_t sdma_interrupt(int irq, void *data)
8259{
8260 struct sdma_engine *sde = data;
8261 struct hfi1_devdata *dd = sde->dd;
8262 u64 status;
8263
8264#ifdef CONFIG_SDMA_VERBOSITY
8265 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8266 slashstrip(__FILE__), __LINE__, __func__);
8267 sdma_dumpstate(sde);
8268#endif
8269
8270 this_cpu_inc(*dd->int_counter);
8271
8272 /* This read_csr is really bad in the hot path */
8273 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008274 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8275 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008276 if (likely(status)) {
8277 /* clear the interrupt(s) */
8278 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008279 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8280 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008281
8282 /* handle the interrupt(s) */
8283 sdma_engine_interrupt(sde, status);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008284 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008285 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008286 sde->this_idx);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008287 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04008288 return IRQ_HANDLED;
8289}
8290
8291/*
Dean Luickecd42f82016-02-03 14:35:14 -08008292 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8293 * to insure that the write completed. This does NOT guarantee that
8294 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008295 */
8296static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8297{
8298 struct hfi1_devdata *dd = rcd->dd;
8299 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8300
8301 mmiowb(); /* make sure everything before is written */
8302 write_csr(dd, addr, rcd->imask);
8303 /* force the above write on the chip and get a value back */
8304 (void)read_csr(dd, addr);
8305}
8306
8307/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008308void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008309{
8310 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8311}
8312
Dean Luickecd42f82016-02-03 14:35:14 -08008313/*
8314 * Return non-zero if a packet is present.
8315 *
8316 * This routine is called when rechecking for packets after the RcvAvail
8317 * interrupt has been cleared down. First, do a quick check of memory for
8318 * a packet present. If not found, use an expensive CSR read of the context
8319 * tail to determine the actual tail. The CSR read is necessary because there
8320 * is no method to push pending DMAs to memory other than an interrupt and we
8321 * are trying to determine if we need to force an interrupt.
8322 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008323static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8324{
Dean Luickecd42f82016-02-03 14:35:14 -08008325 u32 tail;
8326 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008327
Dean Luickecd42f82016-02-03 14:35:14 -08008328 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8329 present = (rcd->seq_cnt ==
8330 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8331 else /* is RDMA rtail */
8332 present = (rcd->head != get_rcvhdrtail(rcd));
8333
8334 if (present)
8335 return 1;
8336
8337 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8338 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8339 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008340}
8341
8342/*
8343 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8344 * This routine will try to handle packets immediately (latency), but if
8345 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008346 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008347 * invoked) is finished. The intent is to avoid extra interrupts while we
8348 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008349 */
8350static irqreturn_t receive_context_interrupt(int irq, void *data)
8351{
8352 struct hfi1_ctxtdata *rcd = data;
8353 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008354 int disposition;
8355 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008356
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07008357 trace_hfi1_receive_interrupt(dd, rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008358 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008359 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008360
Dean Luickf4f30031c2015-10-26 10:28:44 -04008361 /* receive interrupt remains blocked while processing packets */
8362 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008363
Dean Luickf4f30031c2015-10-26 10:28:44 -04008364 /*
8365 * Too many packets were seen while processing packets in this
8366 * IRQ handler. Invoke the handler thread. The receive interrupt
8367 * remains blocked.
8368 */
8369 if (disposition == RCV_PKT_LIMIT)
8370 return IRQ_WAKE_THREAD;
8371
8372 /*
8373 * The packet processor detected no more packets. Clear the receive
8374 * interrupt and recheck for a packet packet that may have arrived
8375 * after the previous check and interrupt clear. If a packet arrived,
8376 * force another interrupt.
8377 */
8378 clear_recv_intr(rcd);
8379 present = check_packet_present(rcd);
8380 if (present)
8381 force_recv_intr(rcd);
8382
8383 return IRQ_HANDLED;
8384}
8385
8386/*
8387 * Receive packet thread handler. This expects to be invoked with the
8388 * receive interrupt still blocked.
8389 */
8390static irqreturn_t receive_context_thread(int irq, void *data)
8391{
8392 struct hfi1_ctxtdata *rcd = data;
8393 int present;
8394
8395 /* receive interrupt is still blocked from the IRQ handler */
8396 (void)rcd->do_interrupt(rcd, 1);
8397
8398 /*
8399 * The packet processor will only return if it detected no more
8400 * packets. Hold IRQs here so we can safely clear the interrupt and
8401 * recheck for a packet that may have arrived after the previous
8402 * check and the interrupt clear. If a packet arrived, force another
8403 * interrupt.
8404 */
8405 local_irq_disable();
8406 clear_recv_intr(rcd);
8407 present = check_packet_present(rcd);
8408 if (present)
8409 force_recv_intr(rcd);
8410 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008411
8412 return IRQ_HANDLED;
8413}
8414
8415/* ========================================================================= */
8416
8417u32 read_physical_state(struct hfi1_devdata *dd)
8418{
8419 u64 reg;
8420
8421 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8422 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8423 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8424}
8425
Jim Snowfb9036d2016-01-11 18:32:21 -05008426u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008427{
8428 u64 reg;
8429
8430 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8431 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8432 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8433}
8434
8435static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8436{
8437 u64 reg;
8438
8439 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8440 /* clear current state, set new state */
8441 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8442 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8443 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8444}
8445
8446/*
8447 * Use the 8051 to read a LCB CSR.
8448 */
8449static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8450{
8451 u32 regno;
8452 int ret;
8453
8454 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8455 if (acquire_lcb_access(dd, 0) == 0) {
8456 *data = read_csr(dd, addr);
8457 release_lcb_access(dd, 0);
8458 return 0;
8459 }
8460 return -EBUSY;
8461 }
8462
8463 /* register is an index of LCB registers: (offset - base) / 8 */
8464 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8465 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8466 if (ret != HCMD_SUCCESS)
8467 return -EBUSY;
8468 return 0;
8469}
8470
8471/*
Michael J. Ruhl86884262017-03-20 17:24:51 -07008472 * Provide a cache for some of the LCB registers in case the LCB is
8473 * unavailable.
8474 * (The LCB is unavailable in certain link states, for example.)
8475 */
8476struct lcb_datum {
8477 u32 off;
8478 u64 val;
8479};
8480
8481static struct lcb_datum lcb_cache[] = {
8482 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8483 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8484 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8485};
8486
8487static void update_lcb_cache(struct hfi1_devdata *dd)
8488{
8489 int i;
8490 int ret;
8491 u64 val;
8492
8493 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8494 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8495
8496 /* Update if we get good data */
8497 if (likely(ret != -EBUSY))
8498 lcb_cache[i].val = val;
8499 }
8500}
8501
8502static int read_lcb_cache(u32 off, u64 *val)
8503{
8504 int i;
8505
8506 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8507 if (lcb_cache[i].off == off) {
8508 *val = lcb_cache[i].val;
8509 return 0;
8510 }
8511 }
8512
8513 pr_warn("%s bad offset 0x%x\n", __func__, off);
8514 return -1;
8515}
8516
8517/*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008518 * Read an LCB CSR. Access may not be in host control, so check.
8519 * Return 0 on success, -EBUSY on failure.
8520 */
8521int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8522{
8523 struct hfi1_pportdata *ppd = dd->pport;
8524
8525 /* if up, go through the 8051 for the value */
8526 if (ppd->host_link_state & HLS_UP)
8527 return read_lcb_via_8051(dd, addr, data);
Michael J. Ruhl86884262017-03-20 17:24:51 -07008528 /* if going up or down, check the cache, otherwise, no access */
8529 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8530 if (read_lcb_cache(addr, data))
8531 return -EBUSY;
8532 return 0;
8533 }
8534
Mike Marciniszyn77241052015-07-30 15:17:43 -04008535 /* otherwise, host has access */
8536 *data = read_csr(dd, addr);
8537 return 0;
8538}
8539
8540/*
8541 * Use the 8051 to write a LCB CSR.
8542 */
8543static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8544{
Dean Luick3bf40d62015-11-06 20:07:04 -05008545 u32 regno;
8546 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008547
Dean Luick3bf40d62015-11-06 20:07:04 -05008548 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008549 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
Dean Luick3bf40d62015-11-06 20:07:04 -05008550 if (acquire_lcb_access(dd, 0) == 0) {
8551 write_csr(dd, addr, data);
8552 release_lcb_access(dd, 0);
8553 return 0;
8554 }
8555 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008556 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008557
8558 /* register is an index of LCB registers: (offset - base) / 8 */
8559 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8560 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8561 if (ret != HCMD_SUCCESS)
8562 return -EBUSY;
8563 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008564}
8565
8566/*
8567 * Write an LCB CSR. Access may not be in host control, so check.
8568 * Return 0 on success, -EBUSY on failure.
8569 */
8570int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8571{
8572 struct hfi1_pportdata *ppd = dd->pport;
8573
8574 /* if up, go through the 8051 for the value */
8575 if (ppd->host_link_state & HLS_UP)
8576 return write_lcb_via_8051(dd, addr, data);
8577 /* if going up or down, no access */
8578 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8579 return -EBUSY;
8580 /* otherwise, host has access */
8581 write_csr(dd, addr, data);
8582 return 0;
8583}
8584
8585/*
8586 * Returns:
8587 * < 0 = Linux error, not able to get access
8588 * > 0 = 8051 command RETURN_CODE
8589 */
8590static int do_8051_command(
8591 struct hfi1_devdata *dd,
8592 u32 type,
8593 u64 in_data,
8594 u64 *out_data)
8595{
8596 u64 reg, completed;
8597 int return_code;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008598 unsigned long timeout;
8599
8600 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8601
Tadeusz Struk22546b72017-04-28 10:40:02 -07008602 mutex_lock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008603
8604 /* We can't send any commands to the 8051 if it's in reset */
8605 if (dd->dc_shutdown) {
8606 return_code = -ENODEV;
8607 goto fail;
8608 }
8609
8610 /*
8611 * If an 8051 host command timed out previously, then the 8051 is
8612 * stuck.
8613 *
8614 * On first timeout, attempt to reset and restart the entire DC
8615 * block (including 8051). (Is this too big of a hammer?)
8616 *
8617 * If the 8051 times out a second time, the reset did not bring it
8618 * back to healthy life. In that case, fail any subsequent commands.
8619 */
8620 if (dd->dc8051_timed_out) {
8621 if (dd->dc8051_timed_out > 1) {
8622 dd_dev_err(dd,
8623 "Previous 8051 host command timed out, skipping command %u\n",
8624 type);
8625 return_code = -ENXIO;
8626 goto fail;
8627 }
Tadeusz Struk22546b72017-04-28 10:40:02 -07008628 _dc_shutdown(dd);
8629 _dc_start(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008630 }
8631
8632 /*
8633 * If there is no timeout, then the 8051 command interface is
8634 * waiting for a command.
8635 */
8636
8637 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008638 * When writing a LCB CSR, out_data contains the full value to
8639 * to be written, while in_data contains the relative LCB
8640 * address in 7:0. Do the work here, rather than the caller,
8641 * of distrubting the write data to where it needs to go:
8642 *
8643 * Write data
8644 * 39:00 -> in_data[47:8]
8645 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8646 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8647 */
8648 if (type == HCMD_WRITE_LCB_CSR) {
8649 in_data |= ((*out_data) & 0xffffffffffull) << 8;
Dean Luick00801672016-12-07 19:33:40 -08008650 /* must preserve COMPLETED - it is tied to hardware */
8651 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8652 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8653 reg |= ((((*out_data) >> 40) & 0xff) <<
Dean Luick3bf40d62015-11-06 20:07:04 -05008654 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8655 | ((((*out_data) >> 48) & 0xffff) <<
8656 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8657 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8658 }
8659
8660 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008661 * Do two writes: the first to stabilize the type and req_data, the
8662 * second to activate.
8663 */
8664 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8665 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8666 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8667 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8668 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8669 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8670 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8671
8672 /* wait for completion, alternate: interrupt */
8673 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8674 while (1) {
8675 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8676 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8677 if (completed)
8678 break;
8679 if (time_after(jiffies, timeout)) {
8680 dd->dc8051_timed_out++;
8681 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8682 if (out_data)
8683 *out_data = 0;
8684 return_code = -ETIMEDOUT;
8685 goto fail;
8686 }
8687 udelay(2);
8688 }
8689
8690 if (out_data) {
8691 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8692 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8693 if (type == HCMD_READ_LCB_CSR) {
8694 /* top 16 bits are in a different register */
8695 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8696 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8697 << (48
8698 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8699 }
8700 }
8701 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8702 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8703 dd->dc8051_timed_out = 0;
8704 /*
8705 * Clear command for next user.
8706 */
8707 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8708
8709fail:
Tadeusz Struk22546b72017-04-28 10:40:02 -07008710 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008711 return return_code;
8712}
8713
8714static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8715{
8716 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8717}
8718
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008719int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8720 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008721{
8722 u64 data;
8723 int ret;
8724
8725 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8726 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8727 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8728 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8729 if (ret != HCMD_SUCCESS) {
8730 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008731 "load 8051 config: field id %d, lane %d, err %d\n",
8732 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008733 }
8734 return ret;
8735}
8736
8737/*
8738 * Read the 8051 firmware "registers". Use the RAM directly. Always
8739 * set the result, even on error.
8740 * Return 0 on success, -errno on failure
8741 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008742int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8743 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008744{
8745 u64 big_data;
8746 u32 addr;
8747 int ret;
8748
8749 /* address start depends on the lane_id */
8750 if (lane_id < 4)
8751 addr = (4 * NUM_GENERAL_FIELDS)
8752 + (lane_id * 4 * NUM_LANE_FIELDS);
8753 else
8754 addr = 0;
8755 addr += field_id * 4;
8756
8757 /* read is in 8-byte chunks, hardware will truncate the address down */
8758 ret = read_8051_data(dd, addr, 8, &big_data);
8759
8760 if (ret == 0) {
8761 /* extract the 4 bytes we want */
8762 if (addr & 0x4)
8763 *result = (u32)(big_data >> 32);
8764 else
8765 *result = (u32)big_data;
8766 } else {
8767 *result = 0;
8768 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008769 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008770 }
8771
8772 return ret;
8773}
8774
8775static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8776 u8 continuous)
8777{
8778 u32 frame;
8779
8780 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8781 | power_management << POWER_MANAGEMENT_SHIFT;
8782 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8783 GENERAL_CONFIG, frame);
8784}
8785
8786static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8787 u16 vl15buf, u8 crc_sizes)
8788{
8789 u32 frame;
8790
8791 frame = (u32)vau << VAU_SHIFT
8792 | (u32)z << Z_SHIFT
8793 | (u32)vcu << VCU_SHIFT
8794 | (u32)vl15buf << VL15BUF_SHIFT
8795 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8796 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8797 GENERAL_CONFIG, frame);
8798}
8799
8800static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8801 u8 *flag_bits, u16 *link_widths)
8802{
8803 u32 frame;
8804
8805 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008806 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008807 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8808 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8809 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8810}
8811
8812static int write_vc_local_link_width(struct hfi1_devdata *dd,
8813 u8 misc_bits,
8814 u8 flag_bits,
8815 u16 link_widths)
8816{
8817 u32 frame;
8818
8819 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8820 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8821 | (u32)link_widths << LINK_WIDTH_SHIFT;
8822 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8823 frame);
8824}
8825
8826static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8827 u8 device_rev)
8828{
8829 u32 frame;
8830
8831 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8832 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8833 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8834}
8835
8836static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8837 u8 *device_rev)
8838{
8839 u32 frame;
8840
8841 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8842 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8843 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8844 & REMOTE_DEVICE_REV_MASK;
8845}
8846
Sebastian Sanchez913cc672017-07-29 08:44:01 -07008847int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8848{
8849 u32 frame;
8850 u32 mask;
8851
8852 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8853 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8854 /* Clear, then set field */
8855 frame &= ~mask;
8856 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8857 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8858 frame);
8859}
8860
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008861void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8862 u8 *ver_patch)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008863{
8864 u32 frame;
8865
8866 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008867 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8868 STS_FM_VERSION_MAJOR_MASK;
8869 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8870 STS_FM_VERSION_MINOR_MASK;
8871
8872 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8873 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8874 STS_FM_VERSION_PATCH_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008875}
8876
8877static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8878 u8 *continuous)
8879{
8880 u32 frame;
8881
8882 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8883 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8884 & POWER_MANAGEMENT_MASK;
8885 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8886 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8887}
8888
8889static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8890 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8891{
8892 u32 frame;
8893
8894 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8895 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8896 *z = (frame >> Z_SHIFT) & Z_MASK;
8897 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8898 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8899 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8900}
8901
8902static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8903 u8 *remote_tx_rate,
8904 u16 *link_widths)
8905{
8906 u32 frame;
8907
8908 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008909 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008910 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8911 & REMOTE_TX_RATE_MASK;
8912 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8913}
8914
8915static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8916{
8917 u32 frame;
8918
8919 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8920 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8921}
8922
8923static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8924{
8925 u32 frame;
8926
8927 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8928 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8929}
8930
8931static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8932{
8933 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8934}
8935
8936static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8937{
8938 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8939}
8940
8941void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8942{
8943 u32 frame;
8944 int ret;
8945
8946 *link_quality = 0;
8947 if (dd->pport->host_link_state & HLS_UP) {
8948 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008949 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008950 if (ret == 0)
8951 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8952 & LINK_QUALITY_MASK;
8953 }
8954}
8955
8956static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8957{
8958 u32 frame;
8959
8960 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8961 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8962}
8963
Dean Luickfeb831d2016-04-14 08:31:36 -07008964static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8965{
8966 u32 frame;
8967
8968 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8969 *ldr = (frame & 0xff);
8970}
8971
Mike Marciniszyn77241052015-07-30 15:17:43 -04008972static int read_tx_settings(struct hfi1_devdata *dd,
8973 u8 *enable_lane_tx,
8974 u8 *tx_polarity_inversion,
8975 u8 *rx_polarity_inversion,
8976 u8 *max_rate)
8977{
8978 u32 frame;
8979 int ret;
8980
8981 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8982 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8983 & ENABLE_LANE_TX_MASK;
8984 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8985 & TX_POLARITY_INVERSION_MASK;
8986 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8987 & RX_POLARITY_INVERSION_MASK;
8988 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8989 return ret;
8990}
8991
8992static int write_tx_settings(struct hfi1_devdata *dd,
8993 u8 enable_lane_tx,
8994 u8 tx_polarity_inversion,
8995 u8 rx_polarity_inversion,
8996 u8 max_rate)
8997{
8998 u32 frame;
8999
9000 /* no need to mask, all variable sizes match field widths */
9001 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9002 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9003 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9004 | max_rate << MAX_RATE_SHIFT;
9005 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9006}
9007
Mike Marciniszyn77241052015-07-30 15:17:43 -04009008/*
9009 * Read an idle LCB message.
9010 *
9011 * Returns 0 on success, -EINVAL on error
9012 */
9013static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9014{
9015 int ret;
9016
Jubin John17fb4f22016-02-14 20:21:52 -08009017 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009018 if (ret != HCMD_SUCCESS) {
9019 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08009020 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009021 return -EINVAL;
9022 }
9023 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9024 /* return only the payload as we already know the type */
9025 *data_out >>= IDLE_PAYLOAD_SHIFT;
9026 return 0;
9027}
9028
9029/*
9030 * Read an idle SMA message. To be done in response to a notification from
9031 * the 8051.
9032 *
9033 * Returns 0 on success, -EINVAL on error
9034 */
9035static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9036{
Jubin John17fb4f22016-02-14 20:21:52 -08009037 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9038 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009039}
9040
9041/*
9042 * Send an idle LCB message.
9043 *
9044 * Returns 0 on success, -EINVAL on error
9045 */
9046static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9047{
9048 int ret;
9049
9050 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9051 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9052 if (ret != HCMD_SUCCESS) {
9053 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08009054 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009055 return -EINVAL;
9056 }
9057 return 0;
9058}
9059
9060/*
9061 * Send an idle SMA message.
9062 *
9063 * Returns 0 on success, -EINVAL on error
9064 */
9065int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9066{
9067 u64 data;
9068
Jubin John17fb4f22016-02-14 20:21:52 -08009069 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9070 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009071 return send_idle_message(dd, data);
9072}
9073
9074/*
9075 * Initialize the LCB then do a quick link up. This may or may not be
9076 * in loopback.
9077 *
9078 * return 0 on success, -errno on error
9079 */
9080static int do_quick_linkup(struct hfi1_devdata *dd)
9081{
Mike Marciniszyn77241052015-07-30 15:17:43 -04009082 int ret;
9083
9084 lcb_shutdown(dd, 0);
9085
9086 if (loopback) {
9087 /* LCB_CFG_LOOPBACK.VAL = 2 */
9088 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9089 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08009090 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009091 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9092 }
9093
9094 /* start the LCBs */
9095 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9096 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9097
9098 /* simulator only loopback steps */
9099 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9100 /* LCB_CFG_RUN.EN = 1 */
9101 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08009102 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009103
Dean Luickec8a1422017-03-20 17:24:39 -07009104 ret = wait_link_transfer_active(dd, 10);
9105 if (ret)
9106 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009107
9108 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08009109 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009110 }
9111
9112 if (!loopback) {
9113 /*
9114 * When doing quick linkup and not in loopback, both
9115 * sides must be done with LCB set-up before either
9116 * starts the quick linkup. Put a delay here so that
9117 * both sides can be started and have a chance to be
9118 * done with LCB set up before resuming.
9119 */
9120 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009121 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009122 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08009123 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009124 }
9125
9126 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9127 set_8051_lcb_access(dd);
9128
9129 /*
9130 * State "quick" LinkUp request sets the physical link state to
9131 * LinkUp without a verify capability sequence.
9132 * This state is in simulator v37 and later.
9133 */
9134 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9135 if (ret != HCMD_SUCCESS) {
9136 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009137 "%s: set physical link state to quick LinkUp failed with return %d\n",
9138 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009139
9140 set_host_lcb_access(dd);
9141 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9142
9143 if (ret >= 0)
9144 ret = -EINVAL;
9145 return ret;
9146 }
9147
9148 return 0; /* success */
9149}
9150
9151/*
9152 * Set the SerDes to internal loopback mode.
9153 * Returns 0 on success, -errno on error.
9154 */
9155static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9156{
9157 int ret;
9158
9159 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9160 if (ret == HCMD_SUCCESS)
9161 return 0;
9162 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009163 "Set physical link state to SerDes Loopback failed with return %d\n",
9164 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009165 if (ret >= 0)
9166 ret = -EINVAL;
9167 return ret;
9168}
9169
9170/*
9171 * Do all special steps to set up loopback.
9172 */
9173static int init_loopback(struct hfi1_devdata *dd)
9174{
9175 dd_dev_info(dd, "Entering loopback mode\n");
9176
9177 /* all loopbacks should disable self GUID check */
9178 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009179 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009180
9181 /*
9182 * The simulator has only one loopback option - LCB. Switch
9183 * to that option, which includes quick link up.
9184 *
9185 * Accept all valid loopback values.
9186 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009187 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9188 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9189 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009190 loopback = LOOPBACK_LCB;
9191 quick_linkup = 1;
9192 return 0;
9193 }
9194
9195 /* handle serdes loopback */
9196 if (loopback == LOOPBACK_SERDES) {
9197 /* internal serdes loopack needs quick linkup on RTL */
9198 if (dd->icode == ICODE_RTL_SILICON)
9199 quick_linkup = 1;
9200 return set_serdes_loopback_mode(dd);
9201 }
9202
9203 /* LCB loopback - handled at poll time */
9204 if (loopback == LOOPBACK_LCB) {
9205 quick_linkup = 1; /* LCB is always quick linkup */
9206
9207 /* not supported in emulation due to emulation RTL changes */
9208 if (dd->icode == ICODE_FPGA_EMULATION) {
9209 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009210 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009211 return -EINVAL;
9212 }
9213 return 0;
9214 }
9215
9216 /* external cable loopback requires no extra steps */
9217 if (loopback == LOOPBACK_CABLE)
9218 return 0;
9219
9220 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9221 return -EINVAL;
9222}
9223
9224/*
9225 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9226 * used in the Verify Capability link width attribute.
9227 */
9228static u16 opa_to_vc_link_widths(u16 opa_widths)
9229{
9230 int i;
9231 u16 result = 0;
9232
9233 static const struct link_bits {
9234 u16 from;
9235 u16 to;
9236 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009237 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9238 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9239 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9240 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009241 };
9242
9243 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9244 if (opa_widths & opa_link_xlate[i].from)
9245 result |= opa_link_xlate[i].to;
9246 }
9247 return result;
9248}
9249
9250/*
9251 * Set link attributes before moving to polling.
9252 */
9253static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9254{
9255 struct hfi1_devdata *dd = ppd->dd;
9256 u8 enable_lane_tx;
9257 u8 tx_polarity_inversion;
9258 u8 rx_polarity_inversion;
9259 int ret;
9260
9261 /* reset our fabric serdes to clear any lingering problems */
9262 fabric_serdes_reset(dd);
9263
9264 /* set the local tx rate - need to read-modify-write */
9265 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009266 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009267 if (ret)
9268 goto set_local_link_attributes_fail;
9269
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07009270 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009271 /* set the tx rate to the fastest enabled */
9272 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9273 ppd->local_tx_rate = 1;
9274 else
9275 ppd->local_tx_rate = 0;
9276 } else {
9277 /* set the tx rate to all enabled */
9278 ppd->local_tx_rate = 0;
9279 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9280 ppd->local_tx_rate |= 2;
9281 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9282 ppd->local_tx_rate |= 1;
9283 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009284
9285 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009286 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009287 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009288 if (ret != HCMD_SUCCESS)
9289 goto set_local_link_attributes_fail;
9290
9291 /*
9292 * DC supports continuous updates.
9293 */
Jubin John17fb4f22016-02-14 20:21:52 -08009294 ret = write_vc_local_phy(dd,
9295 0 /* no power management */,
9296 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009297 if (ret != HCMD_SUCCESS)
9298 goto set_local_link_attributes_fail;
9299
9300 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9301 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9302 ppd->port_crc_mode_enabled);
9303 if (ret != HCMD_SUCCESS)
9304 goto set_local_link_attributes_fail;
9305
9306 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009307 opa_to_vc_link_widths(
9308 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009309 if (ret != HCMD_SUCCESS)
9310 goto set_local_link_attributes_fail;
9311
9312 /* let peer know who we are */
9313 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9314 if (ret == HCMD_SUCCESS)
9315 return 0;
9316
9317set_local_link_attributes_fail:
9318 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009319 "Failed to set local link attributes, return 0x%x\n",
9320 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009321 return ret;
9322}
9323
9324/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009325 * Call this to start the link.
9326 * Do not do anything if the link is disabled.
9327 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009328 */
9329int start_link(struct hfi1_pportdata *ppd)
9330{
Dean Luick0db9dec2016-09-06 04:35:20 -07009331 /*
9332 * Tune the SerDes to a ballpark setting for optimal signal and bit
9333 * error rate. Needs to be done before starting the link.
9334 */
9335 tune_serdes(ppd);
9336
Mike Marciniszyn77241052015-07-30 15:17:43 -04009337 if (!ppd->driver_link_ready) {
9338 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009339 "%s: stopping link start because driver is not ready\n",
9340 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009341 return 0;
9342 }
9343
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009344 /*
9345 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9346 * pkey table can be configured properly if the HFI unit is connected
9347 * to switch port with MgmtAllowed=NO
9348 */
9349 clear_full_mgmt_pkey(ppd);
9350
Easwar Hariharan623bba22016-04-12 11:25:57 -07009351 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009352}
9353
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009354static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9355{
9356 struct hfi1_devdata *dd = ppd->dd;
9357 u64 mask;
9358 unsigned long timeout;
9359
9360 /*
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009361 * Some QSFP cables have a quirk that asserts the IntN line as a side
9362 * effect of power up on plug-in. We ignore this false positive
9363 * interrupt until the module has finished powering up by waiting for
9364 * a minimum timeout of the module inrush initialization time of
9365 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9366 * module have stabilized.
9367 */
9368 msleep(500);
9369
9370 /*
9371 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009372 */
9373 timeout = jiffies + msecs_to_jiffies(2000);
9374 while (1) {
9375 mask = read_csr(dd, dd->hfi1_id ?
9376 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009377 if (!(mask & QSFP_HFI0_INT_N))
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009378 break;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009379 if (time_after(jiffies, timeout)) {
9380 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9381 __func__);
9382 break;
9383 }
9384 udelay(2);
9385 }
9386}
9387
9388static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9389{
9390 struct hfi1_devdata *dd = ppd->dd;
9391 u64 mask;
9392
9393 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009394 if (enable) {
9395 /*
9396 * Clear the status register to avoid an immediate interrupt
9397 * when we re-enable the IntN pin
9398 */
9399 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9400 QSFP_HFI0_INT_N);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009401 mask |= (u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009402 } else {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009403 mask &= ~(u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009404 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009405 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9406}
9407
9408void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009409{
9410 struct hfi1_devdata *dd = ppd->dd;
9411 u64 mask, qsfp_mask;
9412
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009413 /* Disable INT_N from triggering QSFP interrupts */
9414 set_qsfp_int_n(ppd, 0);
9415
9416 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009417 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009418
9419 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009420 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009421 qsfp_mask &= ~mask;
9422 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009423 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009424
9425 udelay(10);
9426
9427 qsfp_mask |= mask;
9428 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009429 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009430
9431 wait_for_qsfp_init(ppd);
9432
9433 /*
9434 * Allow INT_N to trigger the QSFP interrupt to watch
9435 * for alarms and warnings
9436 */
9437 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009438}
9439
9440static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9441 u8 *qsfp_interrupt_status)
9442{
9443 struct hfi1_devdata *dd = ppd->dd;
9444
9445 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009446 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009447 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9448 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009449
9450 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009451 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009452 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9453 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009454
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009455 /*
9456 * The remaining alarms/warnings don't matter if the link is down.
9457 */
9458 if (ppd->host_link_state & HLS_DOWN)
9459 return 0;
9460
Mike Marciniszyn77241052015-07-30 15:17:43 -04009461 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009462 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009463 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9464 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009465
9466 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009467 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009468 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9469 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009470
9471 /* Byte 2 is vendor specific */
9472
9473 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009474 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009475 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9476 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009477
9478 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009479 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009480 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9481 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009482
9483 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009484 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009485 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9486 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009487
9488 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009489 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009490 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9491 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009492
9493 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009494 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009495 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9496 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009497
9498 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009499 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009500 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9501 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009502
9503 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009504 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009505 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9506 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009507
9508 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009509 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009510 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9511 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009512
9513 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009514 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009515 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9516 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009517
9518 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009519 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009520 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9521 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009522
9523 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009524 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009525 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9526 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009527
9528 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009529 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009530 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9531 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009532
9533 /* Bytes 9-10 and 11-12 are reserved */
9534 /* Bytes 13-15 are vendor specific */
9535
9536 return 0;
9537}
9538
Easwar Hariharan623bba22016-04-12 11:25:57 -07009539/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009540void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009541{
9542 struct qsfp_data *qd;
9543 struct hfi1_pportdata *ppd;
9544 struct hfi1_devdata *dd;
9545
9546 qd = container_of(work, struct qsfp_data, qsfp_work);
9547 ppd = qd->ppd;
9548 dd = ppd->dd;
9549
9550 /* Sanity check */
9551 if (!qsfp_mod_present(ppd))
9552 return;
9553
Jan Sokolowski96603ed2017-07-29 08:43:26 -07009554 if (ppd->host_link_state == HLS_DN_DISABLE) {
9555 dd_dev_info(ppd->dd,
9556 "%s: stopping link start because link is disabled\n",
9557 __func__);
9558 return;
9559 }
9560
Mike Marciniszyn77241052015-07-30 15:17:43 -04009561 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009562 * Turn DC back on after cable has been re-inserted. Up until
9563 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009564 */
9565 dc_start(dd);
9566
9567 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009568 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009569
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009570 wait_for_qsfp_init(ppd);
9571
9572 /*
9573 * Allow INT_N to trigger the QSFP interrupt to watch
9574 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009575 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009576 set_qsfp_int_n(ppd, 1);
9577
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009578 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009579 }
9580
9581 if (qd->check_interrupt_flags) {
9582 u8 qsfp_interrupt_status[16] = {0,};
9583
Dean Luick765a6fa2016-03-05 08:50:06 -08009584 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9585 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009586 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009587 "%s: Failed to read status of QSFP module\n",
9588 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009589 } else {
9590 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009591
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009592 handle_qsfp_error_conditions(
9593 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009594 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9595 ppd->qsfp_info.check_interrupt_flags = 0;
9596 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009597 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009598 }
9599 }
9600}
9601
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009602static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009603{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009604 struct hfi1_pportdata *ppd = dd->pport;
9605 u64 qsfp_mask, cce_int_mask;
9606 const int qsfp1_int_smask = QSFP1_INT % 64;
9607 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009608
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009609 /*
9610 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9611 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9612 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9613 * the index of the appropriate CSR in the CCEIntMask CSR array
9614 */
9615 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9616 (8 * (QSFP1_INT / 64)));
9617 if (dd->hfi1_id) {
9618 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9619 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9620 cce_int_mask);
9621 } else {
9622 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9623 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9624 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009625 }
9626
Mike Marciniszyn77241052015-07-30 15:17:43 -04009627 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9628 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009629 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9630 qsfp_mask);
9631 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9632 qsfp_mask);
9633
9634 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009635
9636 /* Handle active low nature of INT_N and MODPRST_N pins */
9637 if (qsfp_mod_present(ppd))
9638 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9639 write_csr(dd,
9640 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9641 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009642}
9643
Dean Luickbbdeb332015-12-01 15:38:15 -05009644/*
9645 * Do a one-time initialize of the LCB block.
9646 */
9647static void init_lcb(struct hfi1_devdata *dd)
9648{
Dean Luicka59329d2016-02-03 14:32:31 -08009649 /* simulator does not correctly handle LCB cclk loopback, skip */
9650 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9651 return;
9652
Dean Luickbbdeb332015-12-01 15:38:15 -05009653 /* the DC has been reset earlier in the driver load */
9654
9655 /* set LCB for cclk loopback on the port */
9656 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9657 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9658 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9659 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9660 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9661 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9662 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9663}
9664
Dean Luick673b9752016-08-31 07:24:33 -07009665/*
9666 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9667 * on error.
9668 */
9669static int test_qsfp_read(struct hfi1_pportdata *ppd)
9670{
9671 int ret;
9672 u8 status;
9673
Easwar Hariharanfb897ad2017-03-20 17:25:42 -07009674 /*
9675 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9676 * not present
9677 */
9678 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
Dean Luick673b9752016-08-31 07:24:33 -07009679 return 0;
9680
9681 /* read byte 2, the status byte */
9682 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9683 if (ret < 0)
9684 return ret;
9685 if (ret != 1)
9686 return -EIO;
9687
9688 return 0; /* success */
9689}
9690
9691/*
9692 * Values for QSFP retry.
9693 *
9694 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9695 * arrived at from experience on a large cluster.
9696 */
9697#define MAX_QSFP_RETRIES 20
9698#define QSFP_RETRY_WAIT 500 /* msec */
9699
9700/*
9701 * Try a QSFP read. If it fails, schedule a retry for later.
9702 * Called on first link activation after driver load.
9703 */
9704static void try_start_link(struct hfi1_pportdata *ppd)
9705{
9706 if (test_qsfp_read(ppd)) {
9707 /* read failed */
9708 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9709 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9710 return;
9711 }
9712 dd_dev_info(ppd->dd,
9713 "QSFP not responding, waiting and retrying %d\n",
9714 (int)ppd->qsfp_retry_count);
9715 ppd->qsfp_retry_count++;
Sebastian Sanchez71d47002017-07-29 08:43:49 -07009716 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
Dean Luick673b9752016-08-31 07:24:33 -07009717 msecs_to_jiffies(QSFP_RETRY_WAIT));
9718 return;
9719 }
9720 ppd->qsfp_retry_count = 0;
9721
Dean Luick673b9752016-08-31 07:24:33 -07009722 start_link(ppd);
9723}
9724
9725/*
9726 * Workqueue function to start the link after a delay.
9727 */
9728void handle_start_link(struct work_struct *work)
9729{
9730 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9731 start_link_work.work);
9732 try_start_link(ppd);
9733}
9734
Mike Marciniszyn77241052015-07-30 15:17:43 -04009735int bringup_serdes(struct hfi1_pportdata *ppd)
9736{
9737 struct hfi1_devdata *dd = ppd->dd;
9738 u64 guid;
9739 int ret;
9740
9741 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9742 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9743
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009744 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
Mike Marciniszyn77241052015-07-30 15:17:43 -04009745 if (!guid) {
9746 if (dd->base_guid)
9747 guid = dd->base_guid + ppd->port - 1;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009748 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009749 }
9750
Mike Marciniszyn77241052015-07-30 15:17:43 -04009751 /* Set linkinit_reason on power up per OPA spec */
9752 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9753
Dean Luickbbdeb332015-12-01 15:38:15 -05009754 /* one-time init of the LCB */
9755 init_lcb(dd);
9756
Mike Marciniszyn77241052015-07-30 15:17:43 -04009757 if (loopback) {
9758 ret = init_loopback(dd);
9759 if (ret < 0)
9760 return ret;
9761 }
9762
Easwar Hariharan9775a992016-05-12 10:22:39 -07009763 get_port_type(ppd);
9764 if (ppd->port_type == PORT_TYPE_QSFP) {
9765 set_qsfp_int_n(ppd, 0);
9766 wait_for_qsfp_init(ppd);
9767 set_qsfp_int_n(ppd, 1);
9768 }
9769
Dean Luick673b9752016-08-31 07:24:33 -07009770 try_start_link(ppd);
9771 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009772}
9773
9774void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9775{
9776 struct hfi1_devdata *dd = ppd->dd;
9777
9778 /*
9779 * Shut down the link and keep it down. First turn off that the
9780 * driver wants to allow the link to be up (driver_link_ready).
9781 * Then make sure the link is not automatically restarted
9782 * (link_enabled). Cancel any pending restart. And finally
9783 * go offline.
9784 */
9785 ppd->driver_link_ready = 0;
9786 ppd->link_enabled = 0;
9787
Dean Luick673b9752016-08-31 07:24:33 -07009788 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9789 flush_delayed_work(&ppd->start_link_work);
9790 cancel_delayed_work_sync(&ppd->start_link_work);
9791
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009792 ppd->offline_disabled_reason =
9793 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009794 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009795 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009796 set_link_state(ppd, HLS_DN_OFFLINE);
9797
9798 /* disable the port */
9799 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9800}
9801
9802static inline int init_cpu_counters(struct hfi1_devdata *dd)
9803{
9804 struct hfi1_pportdata *ppd;
9805 int i;
9806
9807 ppd = (struct hfi1_pportdata *)(dd + 1);
9808 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009809 ppd->ibport_data.rvp.rc_acks = NULL;
9810 ppd->ibport_data.rvp.rc_qacks = NULL;
9811 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9812 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9813 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9814 if (!ppd->ibport_data.rvp.rc_acks ||
9815 !ppd->ibport_data.rvp.rc_delayed_comp ||
9816 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009817 return -ENOMEM;
9818 }
9819
9820 return 0;
9821}
9822
Mike Marciniszyn77241052015-07-30 15:17:43 -04009823/*
9824 * index is the index into the receive array
9825 */
9826void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9827 u32 type, unsigned long pa, u16 order)
9828{
9829 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009830
9831 if (!(dd->flags & HFI1_PRESENT))
9832 goto done;
9833
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009834 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009835 pa = 0;
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009836 order = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009837 } else if (type > PT_INVALID) {
9838 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009839 "unexpected receive array type %u for index %u, not handled\n",
9840 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009841 goto done;
9842 }
Mike Marciniszyn8cb10212017-06-09 15:59:59 -07009843 trace_hfi1_put_tid(dd, index, type, pa, order);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009844
9845#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9846 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9847 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9848 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9849 << RCV_ARRAY_RT_ADDR_SHIFT;
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009850 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9851 writeq(reg, dd->rcvarray_wc + (index * 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009852
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009853 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009854 /*
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009855 * Eager entries are written and flushed
9856 *
9857 * Expected entries are flushed every 4 writes
Mike Marciniszyn77241052015-07-30 15:17:43 -04009858 */
9859 flush_wc();
9860done:
9861 return;
9862}
9863
9864void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9865{
9866 struct hfi1_devdata *dd = rcd->dd;
9867 u32 i;
9868
9869 /* this could be optimized */
9870 for (i = rcd->eager_base; i < rcd->eager_base +
9871 rcd->egrbufs.alloced; i++)
9872 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9873
9874 for (i = rcd->expected_base;
9875 i < rcd->expected_base + rcd->expected_count; i++)
9876 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9877}
9878
Mike Marciniszyn77241052015-07-30 15:17:43 -04009879static const char * const ib_cfg_name_strings[] = {
9880 "HFI1_IB_CFG_LIDLMC",
9881 "HFI1_IB_CFG_LWID_DG_ENB",
9882 "HFI1_IB_CFG_LWID_ENB",
9883 "HFI1_IB_CFG_LWID",
9884 "HFI1_IB_CFG_SPD_ENB",
9885 "HFI1_IB_CFG_SPD",
9886 "HFI1_IB_CFG_RXPOL_ENB",
9887 "HFI1_IB_CFG_LREV_ENB",
9888 "HFI1_IB_CFG_LINKLATENCY",
9889 "HFI1_IB_CFG_HRTBT",
9890 "HFI1_IB_CFG_OP_VLS",
9891 "HFI1_IB_CFG_VL_HIGH_CAP",
9892 "HFI1_IB_CFG_VL_LOW_CAP",
9893 "HFI1_IB_CFG_OVERRUN_THRESH",
9894 "HFI1_IB_CFG_PHYERR_THRESH",
9895 "HFI1_IB_CFG_LINKDEFAULT",
9896 "HFI1_IB_CFG_PKEYS",
9897 "HFI1_IB_CFG_MTU",
9898 "HFI1_IB_CFG_LSTATE",
9899 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9900 "HFI1_IB_CFG_PMA_TICKS",
9901 "HFI1_IB_CFG_PORT"
9902};
9903
9904static const char *ib_cfg_name(int which)
9905{
9906 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9907 return "invalid";
9908 return ib_cfg_name_strings[which];
9909}
9910
9911int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9912{
9913 struct hfi1_devdata *dd = ppd->dd;
9914 int val = 0;
9915
9916 switch (which) {
9917 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9918 val = ppd->link_width_enabled;
9919 break;
9920 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9921 val = ppd->link_width_active;
9922 break;
9923 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9924 val = ppd->link_speed_enabled;
9925 break;
9926 case HFI1_IB_CFG_SPD: /* current Link speed */
9927 val = ppd->link_speed_active;
9928 break;
9929
9930 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9931 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9932 case HFI1_IB_CFG_LINKLATENCY:
9933 goto unimplemented;
9934
9935 case HFI1_IB_CFG_OP_VLS:
9936 val = ppd->vls_operational;
9937 break;
9938 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9939 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9940 break;
9941 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9942 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9943 break;
9944 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9945 val = ppd->overrun_threshold;
9946 break;
9947 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9948 val = ppd->phy_error_threshold;
9949 break;
9950 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9951 val = dd->link_default;
9952 break;
9953
9954 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9955 case HFI1_IB_CFG_PMA_TICKS:
9956 default:
9957unimplemented:
9958 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9959 dd_dev_info(
9960 dd,
9961 "%s: which %s: not implemented\n",
9962 __func__,
9963 ib_cfg_name(which));
9964 break;
9965 }
9966
9967 return val;
9968}
9969
9970/*
9971 * The largest MAD packet size.
9972 */
9973#define MAX_MAD_PACKET 2048
9974
9975/*
9976 * Return the maximum header bytes that can go on the _wire_
9977 * for this device. This count includes the ICRC which is
9978 * not part of the packet held in memory but it is appended
9979 * by the HW.
9980 * This is dependent on the device's receive header entry size.
9981 * HFI allows this to be set per-receive context, but the
9982 * driver presently enforces a global value.
9983 */
9984u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9985{
9986 /*
9987 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9988 * the Receive Header Entry Size minus the PBC (or RHF) size
9989 * plus one DW for the ICRC appended by HW.
9990 *
9991 * dd->rcd[0].rcvhdrqentsize is in DW.
9992 * We use rcd[0] as all context will have the same value. Also,
9993 * the first kernel context would have been allocated by now so
9994 * we are guaranteed a valid value.
9995 */
9996 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9997}
9998
9999/*
10000 * Set Send Length
10001 * @ppd - per port data
10002 *
10003 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10004 * registers compare against LRH.PktLen, so use the max bytes included
10005 * in the LRH.
10006 *
10007 * This routine changes all VL values except VL15, which it maintains at
10008 * the same value.
10009 */
10010static void set_send_length(struct hfi1_pportdata *ppd)
10011{
10012 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -050010013 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10014 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010015 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10016 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10017 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -070010018 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -070010019 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010020
10021 for (i = 0; i < ppd->vls_supported; i++) {
10022 if (dd->vld[i].mtu > maxvlmtu)
10023 maxvlmtu = dd->vld[i].mtu;
10024 if (i <= 3)
10025 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10026 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10027 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10028 else
10029 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10030 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10031 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10032 }
10033 write_csr(dd, SEND_LEN_CHECK0, len1);
10034 write_csr(dd, SEND_LEN_CHECK1, len2);
10035 /* adjust kernel credit return thresholds based on new MTUs */
10036 /* all kernel receive contexts have the same hdrqentsize */
10037 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -070010038 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10039 sc_mtu_to_threshold(dd->vld[i].sc,
10040 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -080010041 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -070010042 for (j = 0; j < INIT_SC_PER_VL; j++)
10043 sc_set_cr_threshold(
10044 pio_select_send_context_vl(dd, j, i),
10045 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -070010046 }
10047 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10048 sc_mtu_to_threshold(dd->vld[15].sc,
10049 dd->vld[15].mtu,
10050 dd->rcd[0]->rcvhdrqentsize));
10051 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010052
10053 /* Adjust maximum MTU for the port in DC */
10054 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10055 (ilog2(maxvlmtu >> 8) + 1);
10056 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10057 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10058 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10059 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10060 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10061}
10062
10063static void set_lidlmc(struct hfi1_pportdata *ppd)
10064{
10065 int i;
10066 u64 sreg = 0;
10067 struct hfi1_devdata *dd = ppd->dd;
10068 u32 mask = ~((1U << ppd->lmc) - 1);
10069 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -070010070 u32 lid;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010071
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -070010072 /*
10073 * Program 0 in CSR if port lid is extended. This prevents
10074 * 9B packets being sent out for large lids.
10075 */
10076 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010077 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10078 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -070010079 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -080010080 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -040010081 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10082 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10083 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10084
10085 /*
10086 * Iterate over all the send contexts and set their SLID check
10087 */
10088 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10089 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -070010090 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040010091 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10092
10093 for (i = 0; i < dd->chip_send_contexts; i++) {
10094 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10095 i, (u32)sreg);
10096 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10097 }
10098
10099 /* Now we have to do the same thing for the sdma engines */
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -070010100 sdma_update_lmc(dd, mask, lid);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010101}
10102
Dean Luick6854c692016-07-25 13:38:56 -070010103static const char *state_completed_string(u32 completed)
10104{
10105 static const char * const state_completed[] = {
10106 "EstablishComm",
10107 "OptimizeEQ",
10108 "VerifyCap"
10109 };
10110
10111 if (completed < ARRAY_SIZE(state_completed))
10112 return state_completed[completed];
10113
10114 return "unknown";
10115}
10116
10117static const char all_lanes_dead_timeout_expired[] =
10118 "All lanes were inactive – was the interconnect media removed?";
10119static const char tx_out_of_policy[] =
10120 "Passing lanes on local port do not meet the local link width policy";
10121static const char no_state_complete[] =
10122 "State timeout occurred before link partner completed the state";
10123static const char * const state_complete_reasons[] = {
10124 [0x00] = "Reason unknown",
10125 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10126 [0x02] = "Link partner reported failure",
10127 [0x10] = "Unable to achieve frame sync on any lane",
10128 [0x11] =
10129 "Unable to find a common bit rate with the link partner",
10130 [0x12] =
10131 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10132 [0x13] =
10133 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10134 [0x14] = no_state_complete,
10135 [0x15] =
10136 "State timeout occurred before link partner identified equalization presets",
10137 [0x16] =
10138 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10139 [0x17] = tx_out_of_policy,
10140 [0x20] = all_lanes_dead_timeout_expired,
10141 [0x21] =
10142 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10143 [0x22] = no_state_complete,
10144 [0x23] =
10145 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10146 [0x24] = tx_out_of_policy,
10147 [0x30] = all_lanes_dead_timeout_expired,
10148 [0x31] =
10149 "State timeout occurred waiting for host to process received frames",
10150 [0x32] = no_state_complete,
10151 [0x33] =
10152 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10153 [0x34] = tx_out_of_policy,
10154};
10155
10156static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10157 u32 code)
10158{
10159 const char *str = NULL;
10160
10161 if (code < ARRAY_SIZE(state_complete_reasons))
10162 str = state_complete_reasons[code];
10163
10164 if (str)
10165 return str;
10166 return "Reserved";
10167}
10168
10169/* describe the given last state complete frame */
10170static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10171 const char *prefix)
10172{
10173 struct hfi1_devdata *dd = ppd->dd;
10174 u32 success;
10175 u32 state;
10176 u32 reason;
10177 u32 lanes;
10178
10179 /*
10180 * Decode frame:
10181 * [ 0: 0] - success
10182 * [ 3: 1] - state
10183 * [ 7: 4] - next state timeout
10184 * [15: 8] - reason code
10185 * [31:16] - lanes
10186 */
10187 success = frame & 0x1;
10188 state = (frame >> 1) & 0x7;
10189 reason = (frame >> 8) & 0xff;
10190 lanes = (frame >> 16) & 0xffff;
10191
10192 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10193 prefix, frame);
10194 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10195 state_completed_string(state), state);
10196 dd_dev_err(dd, " state successfully completed: %s\n",
10197 success ? "yes" : "no");
10198 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10199 reason, state_complete_reason_code_string(ppd, reason));
10200 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10201}
10202
10203/*
10204 * Read the last state complete frames and explain them. This routine
10205 * expects to be called if the link went down during link negotiation
10206 * and initialization (LNI). That is, anywhere between polling and link up.
10207 */
10208static void check_lni_states(struct hfi1_pportdata *ppd)
10209{
10210 u32 last_local_state;
10211 u32 last_remote_state;
10212
10213 read_last_local_state(ppd->dd, &last_local_state);
10214 read_last_remote_state(ppd->dd, &last_remote_state);
10215
10216 /*
10217 * Don't report anything if there is nothing to report. A value of
10218 * 0 means the link was taken down while polling and there was no
10219 * training in-process.
10220 */
10221 if (last_local_state == 0 && last_remote_state == 0)
10222 return;
10223
10224 decode_state_complete(ppd, last_local_state, "transmitted");
10225 decode_state_complete(ppd, last_remote_state, "received");
10226}
10227
Dean Luickec8a1422017-03-20 17:24:39 -070010228/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10229static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10230{
10231 u64 reg;
10232 unsigned long timeout;
10233
10234 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10235 timeout = jiffies + msecs_to_jiffies(wait_ms);
10236 while (1) {
10237 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10238 if (reg)
10239 break;
10240 if (time_after(jiffies, timeout)) {
10241 dd_dev_err(dd,
10242 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10243 return -ETIMEDOUT;
10244 }
10245 udelay(2);
10246 }
10247 return 0;
10248}
10249
10250/* called when the logical link state is not down as it should be */
10251static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10252{
10253 struct hfi1_devdata *dd = ppd->dd;
10254
10255 /*
10256 * Bring link up in LCB loopback
10257 */
10258 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10259 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10260 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10261
10262 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10263 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10264 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10265 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10266
10267 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10268 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10269 udelay(3);
10270 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10271 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10272
10273 wait_link_transfer_active(dd, 100);
10274
10275 /*
10276 * Bring the link down again.
10277 */
10278 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10279 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10280 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10281
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070010282 /* adjust ppd->statusp, if needed */
10283 update_statusp(ppd, IB_PORT_DOWN);
10284
10285 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
Dean Luickec8a1422017-03-20 17:24:39 -070010286}
10287
Mike Marciniszyn77241052015-07-30 15:17:43 -040010288/*
10289 * Helper for set_link_state(). Do not call except from that routine.
10290 * Expects ppd->hls_mutex to be held.
10291 *
10292 * @rem_reason value to be sent to the neighbor
10293 *
10294 * LinkDownReasons only set if transition succeeds.
10295 */
10296static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10297{
10298 struct hfi1_devdata *dd = ppd->dd;
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010299 u32 previous_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010300 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010301
Michael J. Ruhl86884262017-03-20 17:24:51 -070010302 update_lcb_cache(dd);
10303
Mike Marciniszyn77241052015-07-30 15:17:43 -040010304 previous_state = ppd->host_link_state;
10305 ppd->host_link_state = HLS_GOING_OFFLINE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010306
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010307 /* start offline transition */
10308 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010309
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010310 if (ret != HCMD_SUCCESS) {
10311 dd_dev_err(dd,
10312 "Failed to transition to Offline link state, return %d\n",
10313 ret);
10314 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010315 }
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010316 if (ppd->offline_disabled_reason ==
10317 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10318 ppd->offline_disabled_reason =
10319 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010320
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010321 /*
10322 * Wait for offline transition. It can take a while for
10323 * the link to go down.
10324 */
10325 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10326 if (ret < 0)
10327 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010328
Mike Marciniszyn77241052015-07-30 15:17:43 -040010329 /*
10330 * Now in charge of LCB - must be after the physical state is
10331 * offline.quiet and before host_link_state is changed.
10332 */
10333 set_host_lcb_access(dd);
10334 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
Dean Luickec8a1422017-03-20 17:24:39 -070010335
10336 /* make sure the logical state is also down */
10337 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10338 if (ret)
10339 force_logical_link_state_down(ppd);
10340
Mike Marciniszyn77241052015-07-30 15:17:43 -040010341 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10342
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010343 if (ppd->port_type == PORT_TYPE_QSFP &&
10344 ppd->qsfp_info.limiting_active &&
10345 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -080010346 int ret;
10347
10348 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10349 if (ret == 0) {
10350 set_qsfp_tx(ppd, 0);
10351 release_chip_resource(dd, qsfp_resource(dd));
10352 } else {
10353 /* not fatal, but should warn */
10354 dd_dev_err(dd,
10355 "Unable to acquire lock to turn off QSFP TX\n");
10356 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010357 }
10358
Mike Marciniszyn77241052015-07-30 15:17:43 -040010359 /*
10360 * The LNI has a mandatory wait time after the physical state
10361 * moves to Offline.Quiet. The wait time may be different
10362 * depending on how the link went down. The 8051 firmware
10363 * will observe the needed wait time and only move to ready
10364 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010365 * is 6s, so wait that long and then at least 0.5s more for
10366 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010367 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010368 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010369 if (ret) {
10370 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010371 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010372 /* state is really offline, so make it so */
10373 ppd->host_link_state = HLS_DN_OFFLINE;
10374 return ret;
10375 }
10376
10377 /*
10378 * The state is now offline and the 8051 is ready to accept host
10379 * requests.
10380 * - change our state
10381 * - notify others if we were previously in a linkup state
10382 */
10383 ppd->host_link_state = HLS_DN_OFFLINE;
10384 if (previous_state & HLS_UP) {
10385 /* went down while link was up */
10386 handle_linkup_change(dd, 0);
10387 } else if (previous_state
10388 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10389 /* went down while attempting link up */
Dean Luick6854c692016-07-25 13:38:56 -070010390 check_lni_states(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010391 }
10392
10393 /* the active link width (downgrade) is 0 on link down */
10394 ppd->link_width_active = 0;
10395 ppd->link_width_downgrade_tx_active = 0;
10396 ppd->link_width_downgrade_rx_active = 0;
10397 ppd->current_egress_rate = 0;
10398 return 0;
10399}
10400
10401/* return the link state name */
10402static const char *link_state_name(u32 state)
10403{
10404 const char *name;
10405 int n = ilog2(state);
10406 static const char * const names[] = {
10407 [__HLS_UP_INIT_BP] = "INIT",
10408 [__HLS_UP_ARMED_BP] = "ARMED",
10409 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10410 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10411 [__HLS_DN_POLL_BP] = "POLL",
10412 [__HLS_DN_DISABLE_BP] = "DISABLE",
10413 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10414 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10415 [__HLS_GOING_UP_BP] = "GOING_UP",
10416 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10417 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10418 };
10419
10420 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10421 return name ? name : "unknown";
10422}
10423
10424/* return the link state reason name */
10425static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10426{
10427 if (state == HLS_UP_INIT) {
10428 switch (ppd->linkinit_reason) {
10429 case OPA_LINKINIT_REASON_LINKUP:
10430 return "(LINKUP)";
10431 case OPA_LINKINIT_REASON_FLAPPING:
10432 return "(FLAPPING)";
10433 case OPA_LINKINIT_OUTSIDE_POLICY:
10434 return "(OUTSIDE_POLICY)";
10435 case OPA_LINKINIT_QUARANTINED:
10436 return "(QUARANTINED)";
10437 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10438 return "(INSUFIC_CAPABILITY)";
10439 default:
10440 break;
10441 }
10442 }
10443 return "";
10444}
10445
10446/*
10447 * driver_physical_state - convert the driver's notion of a port's
10448 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10449 * Return -1 (converted to a u32) to indicate error.
10450 */
10451u32 driver_physical_state(struct hfi1_pportdata *ppd)
10452{
10453 switch (ppd->host_link_state) {
10454 case HLS_UP_INIT:
10455 case HLS_UP_ARMED:
10456 case HLS_UP_ACTIVE:
10457 return IB_PORTPHYSSTATE_LINKUP;
10458 case HLS_DN_POLL:
10459 return IB_PORTPHYSSTATE_POLLING;
10460 case HLS_DN_DISABLE:
10461 return IB_PORTPHYSSTATE_DISABLED;
10462 case HLS_DN_OFFLINE:
10463 return OPA_PORTPHYSSTATE_OFFLINE;
10464 case HLS_VERIFY_CAP:
10465 return IB_PORTPHYSSTATE_POLLING;
10466 case HLS_GOING_UP:
10467 return IB_PORTPHYSSTATE_POLLING;
10468 case HLS_GOING_OFFLINE:
10469 return OPA_PORTPHYSSTATE_OFFLINE;
10470 case HLS_LINK_COOLDOWN:
10471 return OPA_PORTPHYSSTATE_OFFLINE;
10472 case HLS_DN_DOWNDEF:
10473 default:
10474 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10475 ppd->host_link_state);
10476 return -1;
10477 }
10478}
10479
10480/*
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070010481 * driver_lstate - convert the driver's notion of a port's
Mike Marciniszyn77241052015-07-30 15:17:43 -040010482 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10483 * (converted to a u32) to indicate error.
10484 */
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070010485u32 driver_lstate(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010486{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010487 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010488 return IB_PORT_DOWN;
10489
10490 switch (ppd->host_link_state & HLS_UP) {
10491 case HLS_UP_INIT:
10492 return IB_PORT_INIT;
10493 case HLS_UP_ARMED:
10494 return IB_PORT_ARMED;
10495 case HLS_UP_ACTIVE:
10496 return IB_PORT_ACTIVE;
10497 default:
10498 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10499 ppd->host_link_state);
10500 return -1;
10501 }
10502}
10503
10504void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10505 u8 neigh_reason, u8 rem_reason)
10506{
10507 if (ppd->local_link_down_reason.latest == 0 &&
10508 ppd->neigh_link_down_reason.latest == 0) {
10509 ppd->local_link_down_reason.latest = lcl_reason;
10510 ppd->neigh_link_down_reason.latest = neigh_reason;
10511 ppd->remote_link_down_reason = rem_reason;
10512 }
10513}
10514
10515/*
Alex Estrin5e2d6762017-07-24 07:46:36 -070010516 * Verify if BCT for data VLs is non-zero.
10517 */
10518static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10519{
10520 return !!ppd->actual_vls_operational;
10521}
10522
10523/*
Mike Marciniszyn77241052015-07-30 15:17:43 -040010524 * Change the physical and/or logical link state.
10525 *
10526 * Do not call this routine while inside an interrupt. It contains
10527 * calls to routines that can take multiple seconds to finish.
10528 *
10529 * Returns 0 on success, -errno on failure.
10530 */
10531int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10532{
10533 struct hfi1_devdata *dd = ppd->dd;
10534 struct ib_event event = {.device = NULL};
10535 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010536 int orig_new_state, poll_bounce;
10537
10538 mutex_lock(&ppd->hls_lock);
10539
10540 orig_new_state = state;
10541 if (state == HLS_DN_DOWNDEF)
10542 state = dd->link_default;
10543
10544 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010545 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10546 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010547
10548 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010549 link_state_name(ppd->host_link_state),
10550 link_state_name(orig_new_state),
10551 poll_bounce ? "(bounce) " : "",
10552 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010553
Mike Marciniszyn77241052015-07-30 15:17:43 -040010554 /*
10555 * If we're going to a (HLS_*) link state that implies the logical
10556 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10557 * reset is_sm_config_started to 0.
10558 */
10559 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10560 ppd->is_sm_config_started = 0;
10561
10562 /*
10563 * Do nothing if the states match. Let a poll to poll link bounce
10564 * go through.
10565 */
10566 if (ppd->host_link_state == state && !poll_bounce)
10567 goto done;
10568
10569 switch (state) {
10570 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010571 if (ppd->host_link_state == HLS_DN_POLL &&
10572 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010573 /*
10574 * Quick link up jumps from polling to here.
10575 *
10576 * Whether in normal or loopback mode, the
10577 * simulator jumps from polling to link up.
10578 * Accept that here.
10579 */
Jubin John17fb4f22016-02-14 20:21:52 -080010580 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010581 } else if (ppd->host_link_state != HLS_GOING_UP) {
10582 goto unexpected;
10583 }
10584
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010585 /*
10586 * Wait for Link_Up physical state.
10587 * Physical and Logical states should already be
10588 * be transitioned to LinkUp and LinkInit respectively.
10589 */
10590 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10591 if (ret) {
10592 dd_dev_err(dd,
10593 "%s: physical state did not change to LINK-UP\n",
10594 __func__);
10595 break;
10596 }
10597
Mike Marciniszyn77241052015-07-30 15:17:43 -040010598 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10599 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010600 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010601 "%s: logical state did not change to INIT\n",
10602 __func__);
Jan Sokolowski59ec8732017-07-24 07:46:18 -070010603 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010604 }
Jan Sokolowski59ec8732017-07-24 07:46:18 -070010605
10606 /* clear old transient LINKINIT_REASON code */
10607 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10608 ppd->linkinit_reason =
10609 OPA_LINKINIT_REASON_LINKUP;
10610
10611 /* enable the port */
10612 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10613
10614 handle_linkup_change(dd, 1);
10615 ppd->host_link_state = HLS_UP_INIT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010616 break;
10617 case HLS_UP_ARMED:
10618 if (ppd->host_link_state != HLS_UP_INIT)
10619 goto unexpected;
10620
Alex Estrin5e2d6762017-07-24 07:46:36 -070010621 if (!data_vls_operational(ppd)) {
10622 dd_dev_err(dd,
10623 "%s: data VLs not operational\n", __func__);
10624 ret = -EINVAL;
10625 break;
10626 }
10627
Mike Marciniszyn77241052015-07-30 15:17:43 -040010628 set_logical_state(dd, LSTATE_ARMED);
10629 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10630 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010631 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010632 "%s: logical state did not change to ARMED\n",
10633 __func__);
Alex Estrin5efd40c2017-07-29 08:43:20 -070010634 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010635 }
Alex Estrin5efd40c2017-07-29 08:43:20 -070010636 ppd->host_link_state = HLS_UP_ARMED;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010637 /*
10638 * The simulator does not currently implement SMA messages,
10639 * so neighbor_normal is not set. Set it here when we first
10640 * move to Armed.
10641 */
10642 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10643 ppd->neighbor_normal = 1;
10644 break;
10645 case HLS_UP_ACTIVE:
10646 if (ppd->host_link_state != HLS_UP_ARMED)
10647 goto unexpected;
10648
Mike Marciniszyn77241052015-07-30 15:17:43 -040010649 set_logical_state(dd, LSTATE_ACTIVE);
10650 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10651 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010652 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010653 "%s: logical state did not change to ACTIVE\n",
10654 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010655 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010656 /* tell all engines to go running */
10657 sdma_all_running(dd);
Alex Estrin5efd40c2017-07-29 08:43:20 -070010658 ppd->host_link_state = HLS_UP_ACTIVE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010659
10660 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010661 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010662 event.element.port_num = ppd->port;
10663 event.event = IB_EVENT_PORT_ACTIVE;
10664 }
10665 break;
10666 case HLS_DN_POLL:
10667 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10668 ppd->host_link_state == HLS_DN_OFFLINE) &&
10669 dd->dc_shutdown)
10670 dc_start(dd);
10671 /* Hand LED control to the DC */
10672 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10673
10674 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10675 u8 tmp = ppd->link_enabled;
10676
10677 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10678 if (ret) {
10679 ppd->link_enabled = tmp;
10680 break;
10681 }
10682 ppd->remote_link_down_reason = 0;
10683
10684 if (ppd->driver_link_ready)
10685 ppd->link_enabled = 1;
10686 }
10687
Jim Snowfb9036d2016-01-11 18:32:21 -050010688 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010689 ret = set_local_link_attributes(ppd);
10690 if (ret)
10691 break;
10692
10693 ppd->port_error_action = 0;
10694 ppd->host_link_state = HLS_DN_POLL;
10695
10696 if (quick_linkup) {
10697 /* quick linkup does not go into polling */
10698 ret = do_quick_linkup(dd);
10699 } else {
10700 ret1 = set_physical_link_state(dd, PLS_POLLING);
10701 if (ret1 != HCMD_SUCCESS) {
10702 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010703 "Failed to transition to Polling link state, return 0x%x\n",
10704 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010705 ret = -EINVAL;
10706 }
10707 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010708 ppd->offline_disabled_reason =
10709 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010710 /*
10711 * If an error occurred above, go back to offline. The
10712 * caller may reschedule another attempt.
10713 */
10714 if (ret)
10715 goto_offline(ppd, 0);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010716 else
10717 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010718 break;
10719 case HLS_DN_DISABLE:
10720 /* link is disabled */
10721 ppd->link_enabled = 0;
10722
10723 /* allow any state to transition to disabled */
10724
10725 /* must transition to offline first */
10726 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10727 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10728 if (ret)
10729 break;
10730 ppd->remote_link_down_reason = 0;
10731 }
10732
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010733 if (!dd->dc_shutdown) {
10734 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10735 if (ret1 != HCMD_SUCCESS) {
10736 dd_dev_err(dd,
10737 "Failed to transition to Disabled link state, return 0x%x\n",
10738 ret1);
10739 ret = -EINVAL;
10740 break;
10741 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010742 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10743 if (ret) {
10744 dd_dev_err(dd,
10745 "%s: physical state did not change to DISABLED\n",
10746 __func__);
10747 break;
10748 }
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010749 dc_shutdown(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010750 }
10751 ppd->host_link_state = HLS_DN_DISABLE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010752 break;
10753 case HLS_DN_OFFLINE:
10754 if (ppd->host_link_state == HLS_DN_DISABLE)
10755 dc_start(dd);
10756
10757 /* allow any state to transition to offline */
10758 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10759 if (!ret)
10760 ppd->remote_link_down_reason = 0;
10761 break;
10762 case HLS_VERIFY_CAP:
10763 if (ppd->host_link_state != HLS_DN_POLL)
10764 goto unexpected;
10765 ppd->host_link_state = HLS_VERIFY_CAP;
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010766 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010767 break;
10768 case HLS_GOING_UP:
10769 if (ppd->host_link_state != HLS_VERIFY_CAP)
10770 goto unexpected;
10771
10772 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10773 if (ret1 != HCMD_SUCCESS) {
10774 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010775 "Failed to transition to link up state, return 0x%x\n",
10776 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010777 ret = -EINVAL;
10778 break;
10779 }
10780 ppd->host_link_state = HLS_GOING_UP;
10781 break;
10782
10783 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10784 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10785 default:
10786 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010787 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010788 ret = -EINVAL;
10789 break;
10790 }
10791
Mike Marciniszyn77241052015-07-30 15:17:43 -040010792 goto done;
10793
10794unexpected:
10795 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010796 __func__, link_state_name(ppd->host_link_state),
10797 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010798 ret = -EINVAL;
10799
10800done:
10801 mutex_unlock(&ppd->hls_lock);
10802
10803 if (event.device)
10804 ib_dispatch_event(&event);
10805
10806 return ret;
10807}
10808
10809int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10810{
10811 u64 reg;
10812 int ret = 0;
10813
10814 switch (which) {
10815 case HFI1_IB_CFG_LIDLMC:
10816 set_lidlmc(ppd);
10817 break;
10818 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10819 /*
10820 * The VL Arbitrator high limit is sent in units of 4k
10821 * bytes, while HFI stores it in units of 64 bytes.
10822 */
Jubin John8638b772016-02-14 20:19:24 -080010823 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010824 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10825 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10826 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10827 break;
10828 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10829 /* HFI only supports POLL as the default link down state */
10830 if (val != HLS_DN_POLL)
10831 ret = -EINVAL;
10832 break;
10833 case HFI1_IB_CFG_OP_VLS:
10834 if (ppd->vls_operational != val) {
10835 ppd->vls_operational = val;
10836 if (!ppd->port)
10837 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010838 }
10839 break;
10840 /*
10841 * For link width, link width downgrade, and speed enable, always AND
10842 * the setting with what is actually supported. This has two benefits.
10843 * First, enabled can't have unsupported values, no matter what the
10844 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10845 * "fill in with your supported value" have all the bits in the
10846 * field set, so simply ANDing with supported has the desired result.
10847 */
10848 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10849 ppd->link_width_enabled = val & ppd->link_width_supported;
10850 break;
10851 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10852 ppd->link_width_downgrade_enabled =
10853 val & ppd->link_width_downgrade_supported;
10854 break;
10855 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10856 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10857 break;
10858 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10859 /*
10860 * HFI does not follow IB specs, save this value
10861 * so we can report it, if asked.
10862 */
10863 ppd->overrun_threshold = val;
10864 break;
10865 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10866 /*
10867 * HFI does not follow IB specs, save this value
10868 * so we can report it, if asked.
10869 */
10870 ppd->phy_error_threshold = val;
10871 break;
10872
10873 case HFI1_IB_CFG_MTU:
10874 set_send_length(ppd);
10875 break;
10876
10877 case HFI1_IB_CFG_PKEYS:
10878 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10879 set_partition_keys(ppd);
10880 break;
10881
10882 default:
10883 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10884 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010885 "%s: which %s, val 0x%x: not implemented\n",
10886 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010887 break;
10888 }
10889 return ret;
10890}
10891
10892/* begin functions related to vl arbitration table caching */
10893static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10894{
10895 int i;
10896
10897 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10898 VL_ARB_LOW_PRIO_TABLE_SIZE);
10899 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10900 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10901
10902 /*
10903 * Note that we always return values directly from the
10904 * 'vl_arb_cache' (and do no CSR reads) in response to a
10905 * 'Get(VLArbTable)'. This is obviously correct after a
10906 * 'Set(VLArbTable)', since the cache will then be up to
10907 * date. But it's also correct prior to any 'Set(VLArbTable)'
10908 * since then both the cache, and the relevant h/w registers
10909 * will be zeroed.
10910 */
10911
10912 for (i = 0; i < MAX_PRIO_TABLE; i++)
10913 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10914}
10915
10916/*
10917 * vl_arb_lock_cache
10918 *
10919 * All other vl_arb_* functions should be called only after locking
10920 * the cache.
10921 */
10922static inline struct vl_arb_cache *
10923vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10924{
10925 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10926 return NULL;
10927 spin_lock(&ppd->vl_arb_cache[idx].lock);
10928 return &ppd->vl_arb_cache[idx];
10929}
10930
10931static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10932{
10933 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10934}
10935
10936static void vl_arb_get_cache(struct vl_arb_cache *cache,
10937 struct ib_vl_weight_elem *vl)
10938{
10939 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10940}
10941
10942static void vl_arb_set_cache(struct vl_arb_cache *cache,
10943 struct ib_vl_weight_elem *vl)
10944{
10945 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10946}
10947
10948static int vl_arb_match_cache(struct vl_arb_cache *cache,
10949 struct ib_vl_weight_elem *vl)
10950{
10951 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10952}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010953
Mike Marciniszyn77241052015-07-30 15:17:43 -040010954/* end functions related to vl arbitration table caching */
10955
10956static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10957 u32 size, struct ib_vl_weight_elem *vl)
10958{
10959 struct hfi1_devdata *dd = ppd->dd;
10960 u64 reg;
10961 unsigned int i, is_up = 0;
10962 int drain, ret = 0;
10963
10964 mutex_lock(&ppd->hls_lock);
10965
10966 if (ppd->host_link_state & HLS_UP)
10967 is_up = 1;
10968
10969 drain = !is_ax(dd) && is_up;
10970
10971 if (drain)
10972 /*
10973 * Before adjusting VL arbitration weights, empty per-VL
10974 * FIFOs, otherwise a packet whose VL weight is being
10975 * set to 0 could get stuck in a FIFO with no chance to
10976 * egress.
10977 */
10978 ret = stop_drain_data_vls(dd);
10979
10980 if (ret) {
10981 dd_dev_err(
10982 dd,
10983 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10984 __func__);
10985 goto err;
10986 }
10987
10988 for (i = 0; i < size; i++, vl++) {
10989 /*
10990 * NOTE: The low priority shift and mask are used here, but
10991 * they are the same for both the low and high registers.
10992 */
10993 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10994 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10995 | (((u64)vl->weight
10996 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10997 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10998 write_csr(dd, target + (i * 8), reg);
10999 }
11000 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11001
11002 if (drain)
11003 open_fill_data_vls(dd); /* reopen all VLs */
11004
11005err:
11006 mutex_unlock(&ppd->hls_lock);
11007
11008 return ret;
11009}
11010
11011/*
11012 * Read one credit merge VL register.
11013 */
11014static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11015 struct vl_limit *vll)
11016{
11017 u64 reg = read_csr(dd, csr);
11018
11019 vll->dedicated = cpu_to_be16(
11020 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11021 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11022 vll->shared = cpu_to_be16(
11023 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11024 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11025}
11026
11027/*
11028 * Read the current credit merge limits.
11029 */
11030static int get_buffer_control(struct hfi1_devdata *dd,
11031 struct buffer_control *bc, u16 *overall_limit)
11032{
11033 u64 reg;
11034 int i;
11035
11036 /* not all entries are filled in */
11037 memset(bc, 0, sizeof(*bc));
11038
11039 /* OPA and HFI have a 1-1 mapping */
11040 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080011041 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011042
11043 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11044 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11045
11046 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11047 bc->overall_shared_limit = cpu_to_be16(
11048 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11049 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11050 if (overall_limit)
11051 *overall_limit = (reg
11052 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11053 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11054 return sizeof(struct buffer_control);
11055}
11056
11057static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11058{
11059 u64 reg;
11060 int i;
11061
11062 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11063 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11064 for (i = 0; i < sizeof(u64); i++) {
11065 u8 byte = *(((u8 *)&reg) + i);
11066
11067 dp->vlnt[2 * i] = byte & 0xf;
11068 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11069 }
11070
11071 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11072 for (i = 0; i < sizeof(u64); i++) {
11073 u8 byte = *(((u8 *)&reg) + i);
11074
11075 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11076 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11077 }
11078 return sizeof(struct sc2vlnt);
11079}
11080
11081static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11082 struct ib_vl_weight_elem *vl)
11083{
11084 unsigned int i;
11085
11086 for (i = 0; i < nelems; i++, vl++) {
11087 vl->vl = 0xf;
11088 vl->weight = 0;
11089 }
11090}
11091
11092static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11093{
11094 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080011095 DC_SC_VL_VAL(15_0,
11096 0, dp->vlnt[0] & 0xf,
11097 1, dp->vlnt[1] & 0xf,
11098 2, dp->vlnt[2] & 0xf,
11099 3, dp->vlnt[3] & 0xf,
11100 4, dp->vlnt[4] & 0xf,
11101 5, dp->vlnt[5] & 0xf,
11102 6, dp->vlnt[6] & 0xf,
11103 7, dp->vlnt[7] & 0xf,
11104 8, dp->vlnt[8] & 0xf,
11105 9, dp->vlnt[9] & 0xf,
11106 10, dp->vlnt[10] & 0xf,
11107 11, dp->vlnt[11] & 0xf,
11108 12, dp->vlnt[12] & 0xf,
11109 13, dp->vlnt[13] & 0xf,
11110 14, dp->vlnt[14] & 0xf,
11111 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011112 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080011113 DC_SC_VL_VAL(31_16,
11114 16, dp->vlnt[16] & 0xf,
11115 17, dp->vlnt[17] & 0xf,
11116 18, dp->vlnt[18] & 0xf,
11117 19, dp->vlnt[19] & 0xf,
11118 20, dp->vlnt[20] & 0xf,
11119 21, dp->vlnt[21] & 0xf,
11120 22, dp->vlnt[22] & 0xf,
11121 23, dp->vlnt[23] & 0xf,
11122 24, dp->vlnt[24] & 0xf,
11123 25, dp->vlnt[25] & 0xf,
11124 26, dp->vlnt[26] & 0xf,
11125 27, dp->vlnt[27] & 0xf,
11126 28, dp->vlnt[28] & 0xf,
11127 29, dp->vlnt[29] & 0xf,
11128 30, dp->vlnt[30] & 0xf,
11129 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011130}
11131
11132static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11133 u16 limit)
11134{
11135 if (limit != 0)
11136 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011137 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011138}
11139
11140/* change only the shared limit portion of SendCmGLobalCredit */
11141static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11142{
11143 u64 reg;
11144
11145 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11146 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11147 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11148 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11149}
11150
11151/* change only the total credit limit portion of SendCmGLobalCredit */
11152static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11153{
11154 u64 reg;
11155
11156 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11157 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11158 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11159 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11160}
11161
11162/* set the given per-VL shared limit */
11163static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11164{
11165 u64 reg;
11166 u32 addr;
11167
11168 if (vl < TXE_NUM_DATA_VL)
11169 addr = SEND_CM_CREDIT_VL + (8 * vl);
11170 else
11171 addr = SEND_CM_CREDIT_VL15;
11172
11173 reg = read_csr(dd, addr);
11174 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11175 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11176 write_csr(dd, addr, reg);
11177}
11178
11179/* set the given per-VL dedicated limit */
11180static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11181{
11182 u64 reg;
11183 u32 addr;
11184
11185 if (vl < TXE_NUM_DATA_VL)
11186 addr = SEND_CM_CREDIT_VL + (8 * vl);
11187 else
11188 addr = SEND_CM_CREDIT_VL15;
11189
11190 reg = read_csr(dd, addr);
11191 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11192 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11193 write_csr(dd, addr, reg);
11194}
11195
11196/* spin until the given per-VL status mask bits clear */
11197static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11198 const char *which)
11199{
11200 unsigned long timeout;
11201 u64 reg;
11202
11203 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11204 while (1) {
11205 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11206
11207 if (reg == 0)
11208 return; /* success */
11209 if (time_after(jiffies, timeout))
11210 break; /* timed out */
11211 udelay(1);
11212 }
11213
11214 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011215 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11216 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011217 /*
11218 * If this occurs, it is likely there was a credit loss on the link.
11219 * The only recovery from that is a link bounce.
11220 */
11221 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011222 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011223}
11224
11225/*
11226 * The number of credits on the VLs may be changed while everything
11227 * is "live", but the following algorithm must be followed due to
11228 * how the hardware is actually implemented. In particular,
11229 * Return_Credit_Status[] is the only correct status check.
11230 *
11231 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11232 * set Global_Shared_Credit_Limit = 0
11233 * use_all_vl = 1
11234 * mask0 = all VLs that are changing either dedicated or shared limits
11235 * set Shared_Limit[mask0] = 0
11236 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11237 * if (changing any dedicated limit)
11238 * mask1 = all VLs that are lowering dedicated limits
11239 * lower Dedicated_Limit[mask1]
11240 * spin until Return_Credit_Status[mask1] == 0
11241 * raise Dedicated_Limits
11242 * raise Shared_Limits
11243 * raise Global_Shared_Credit_Limit
11244 *
11245 * lower = if the new limit is lower, set the limit to the new value
11246 * raise = if the new limit is higher than the current value (may be changed
11247 * earlier in the algorithm), set the new limit to the new value
11248 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011249int set_buffer_control(struct hfi1_pportdata *ppd,
11250 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011251{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011252 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011253 u64 changing_mask, ld_mask, stat_mask;
11254 int change_count;
11255 int i, use_all_mask;
11256 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011257 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011258 /*
11259 * A0: add the variable any_shared_limit_changing below and in the
11260 * algorithm above. If removing A0 support, it can be removed.
11261 */
11262 int any_shared_limit_changing;
11263 struct buffer_control cur_bc;
11264 u8 changing[OPA_MAX_VLS];
11265 u8 lowering_dedicated[OPA_MAX_VLS];
11266 u16 cur_total;
11267 u32 new_total = 0;
11268 const u64 all_mask =
11269 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11270 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11271 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11272 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11273 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11274 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11275 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11276 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11277 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11278
11279#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11280#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11281
Mike Marciniszyn77241052015-07-30 15:17:43 -040011282 /* find the new total credits, do sanity check on unused VLs */
11283 for (i = 0; i < OPA_MAX_VLS; i++) {
11284 if (valid_vl(i)) {
11285 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11286 continue;
11287 }
11288 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080011289 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011290 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080011291 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011292 new_bc->vl[i].dedicated = 0;
11293 new_bc->vl[i].shared = 0;
11294 }
11295 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050011296
Mike Marciniszyn77241052015-07-30 15:17:43 -040011297 /* fetch the current values */
11298 get_buffer_control(dd, &cur_bc, &cur_total);
11299
11300 /*
11301 * Create the masks we will use.
11302 */
11303 memset(changing, 0, sizeof(changing));
11304 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080011305 /*
11306 * NOTE: Assumes that the individual VL bits are adjacent and in
11307 * increasing order
11308 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011309 stat_mask =
11310 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11311 changing_mask = 0;
11312 ld_mask = 0;
11313 change_count = 0;
11314 any_shared_limit_changing = 0;
11315 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11316 if (!valid_vl(i))
11317 continue;
11318 this_shared_changing = new_bc->vl[i].shared
11319 != cur_bc.vl[i].shared;
11320 if (this_shared_changing)
11321 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080011322 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11323 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011324 changing[i] = 1;
11325 changing_mask |= stat_mask;
11326 change_count++;
11327 }
11328 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11329 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11330 lowering_dedicated[i] = 1;
11331 ld_mask |= stat_mask;
11332 }
11333 }
11334
11335 /* bracket the credit change with a total adjustment */
11336 if (new_total > cur_total)
11337 set_global_limit(dd, new_total);
11338
11339 /*
11340 * Start the credit change algorithm.
11341 */
11342 use_all_mask = 0;
11343 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011344 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11345 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011346 set_global_shared(dd, 0);
11347 cur_bc.overall_shared_limit = 0;
11348 use_all_mask = 1;
11349 }
11350
11351 for (i = 0; i < NUM_USABLE_VLS; i++) {
11352 if (!valid_vl(i))
11353 continue;
11354
11355 if (changing[i]) {
11356 set_vl_shared(dd, i, 0);
11357 cur_bc.vl[i].shared = 0;
11358 }
11359 }
11360
11361 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080011362 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011363
11364 if (change_count > 0) {
11365 for (i = 0; i < NUM_USABLE_VLS; i++) {
11366 if (!valid_vl(i))
11367 continue;
11368
11369 if (lowering_dedicated[i]) {
11370 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011371 be16_to_cpu(new_bc->
11372 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011373 cur_bc.vl[i].dedicated =
11374 new_bc->vl[i].dedicated;
11375 }
11376 }
11377
11378 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11379
11380 /* now raise all dedicated that are going up */
11381 for (i = 0; i < NUM_USABLE_VLS; i++) {
11382 if (!valid_vl(i))
11383 continue;
11384
11385 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11386 be16_to_cpu(cur_bc.vl[i].dedicated))
11387 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011388 be16_to_cpu(new_bc->
11389 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011390 }
11391 }
11392
11393 /* next raise all shared that are going up */
11394 for (i = 0; i < NUM_USABLE_VLS; i++) {
11395 if (!valid_vl(i))
11396 continue;
11397
11398 if (be16_to_cpu(new_bc->vl[i].shared) >
11399 be16_to_cpu(cur_bc.vl[i].shared))
11400 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11401 }
11402
11403 /* finally raise the global shared */
11404 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011405 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011406 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011407 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011408
11409 /* bracket the credit change with a total adjustment */
11410 if (new_total < cur_total)
11411 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011412
11413 /*
11414 * Determine the actual number of operational VLS using the number of
11415 * dedicated and shared credits for each VL.
11416 */
11417 if (change_count > 0) {
11418 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11419 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11420 be16_to_cpu(new_bc->vl[i].shared) > 0)
11421 vl_count++;
11422 ppd->actual_vls_operational = vl_count;
11423 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11424 ppd->actual_vls_operational :
11425 ppd->vls_operational,
11426 NULL);
11427 if (ret == 0)
11428 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11429 ppd->actual_vls_operational :
11430 ppd->vls_operational, NULL);
11431 if (ret)
11432 return ret;
11433 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011434 return 0;
11435}
11436
11437/*
11438 * Read the given fabric manager table. Return the size of the
11439 * table (in bytes) on success, and a negative error code on
11440 * failure.
11441 */
11442int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11443
11444{
11445 int size;
11446 struct vl_arb_cache *vlc;
11447
11448 switch (which) {
11449 case FM_TBL_VL_HIGH_ARB:
11450 size = 256;
11451 /*
11452 * OPA specifies 128 elements (of 2 bytes each), though
11453 * HFI supports only 16 elements in h/w.
11454 */
11455 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11456 vl_arb_get_cache(vlc, t);
11457 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11458 break;
11459 case FM_TBL_VL_LOW_ARB:
11460 size = 256;
11461 /*
11462 * OPA specifies 128 elements (of 2 bytes each), though
11463 * HFI supports only 16 elements in h/w.
11464 */
11465 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11466 vl_arb_get_cache(vlc, t);
11467 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11468 break;
11469 case FM_TBL_BUFFER_CONTROL:
11470 size = get_buffer_control(ppd->dd, t, NULL);
11471 break;
11472 case FM_TBL_SC2VLNT:
11473 size = get_sc2vlnt(ppd->dd, t);
11474 break;
11475 case FM_TBL_VL_PREEMPT_ELEMS:
11476 size = 256;
11477 /* OPA specifies 128 elements, of 2 bytes each */
11478 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11479 break;
11480 case FM_TBL_VL_PREEMPT_MATRIX:
11481 size = 256;
11482 /*
11483 * OPA specifies that this is the same size as the VL
11484 * arbitration tables (i.e., 256 bytes).
11485 */
11486 break;
11487 default:
11488 return -EINVAL;
11489 }
11490 return size;
11491}
11492
11493/*
11494 * Write the given fabric manager table.
11495 */
11496int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11497{
11498 int ret = 0;
11499 struct vl_arb_cache *vlc;
11500
11501 switch (which) {
11502 case FM_TBL_VL_HIGH_ARB:
11503 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11504 if (vl_arb_match_cache(vlc, t)) {
11505 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11506 break;
11507 }
11508 vl_arb_set_cache(vlc, t);
11509 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11510 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11511 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11512 break;
11513 case FM_TBL_VL_LOW_ARB:
11514 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11515 if (vl_arb_match_cache(vlc, t)) {
11516 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11517 break;
11518 }
11519 vl_arb_set_cache(vlc, t);
11520 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11521 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11522 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11523 break;
11524 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011525 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011526 break;
11527 case FM_TBL_SC2VLNT:
11528 set_sc2vlnt(ppd->dd, t);
11529 break;
11530 default:
11531 ret = -EINVAL;
11532 }
11533 return ret;
11534}
11535
11536/*
11537 * Disable all data VLs.
11538 *
11539 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11540 */
11541static int disable_data_vls(struct hfi1_devdata *dd)
11542{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011543 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011544 return 1;
11545
11546 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11547
11548 return 0;
11549}
11550
11551/*
11552 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11553 * Just re-enables all data VLs (the "fill" part happens
11554 * automatically - the name was chosen for symmetry with
11555 * stop_drain_data_vls()).
11556 *
11557 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11558 */
11559int open_fill_data_vls(struct hfi1_devdata *dd)
11560{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011561 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011562 return 1;
11563
11564 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11565
11566 return 0;
11567}
11568
11569/*
11570 * drain_data_vls() - assumes that disable_data_vls() has been called,
11571 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11572 * engines to drop to 0.
11573 */
11574static void drain_data_vls(struct hfi1_devdata *dd)
11575{
11576 sc_wait(dd);
11577 sdma_wait(dd);
11578 pause_for_credit_return(dd);
11579}
11580
11581/*
11582 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11583 *
11584 * Use open_fill_data_vls() to resume using data VLs. This pair is
11585 * meant to be used like this:
11586 *
11587 * stop_drain_data_vls(dd);
11588 * // do things with per-VL resources
11589 * open_fill_data_vls(dd);
11590 */
11591int stop_drain_data_vls(struct hfi1_devdata *dd)
11592{
11593 int ret;
11594
11595 ret = disable_data_vls(dd);
11596 if (ret == 0)
11597 drain_data_vls(dd);
11598
11599 return ret;
11600}
11601
11602/*
11603 * Convert a nanosecond time to a cclock count. No matter how slow
11604 * the cclock, a non-zero ns will always have a non-zero result.
11605 */
11606u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11607{
11608 u32 cclocks;
11609
11610 if (dd->icode == ICODE_FPGA_EMULATION)
11611 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11612 else /* simulation pretends to be ASIC */
11613 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11614 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11615 cclocks = 1;
11616 return cclocks;
11617}
11618
11619/*
11620 * Convert a cclock count to nanoseconds. Not matter how slow
11621 * the cclock, a non-zero cclocks will always have a non-zero result.
11622 */
11623u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11624{
11625 u32 ns;
11626
11627 if (dd->icode == ICODE_FPGA_EMULATION)
11628 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11629 else /* simulation pretends to be ASIC */
11630 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11631 if (cclocks && !ns)
11632 ns = 1;
11633 return ns;
11634}
11635
11636/*
11637 * Dynamically adjust the receive interrupt timeout for a context based on
11638 * incoming packet rate.
11639 *
11640 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11641 */
11642static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11643{
11644 struct hfi1_devdata *dd = rcd->dd;
11645 u32 timeout = rcd->rcvavail_timeout;
11646
11647 /*
11648 * This algorithm doubles or halves the timeout depending on whether
11649 * the number of packets received in this interrupt were less than or
11650 * greater equal the interrupt count.
11651 *
11652 * The calculations below do not allow a steady state to be achieved.
11653 * Only at the endpoints it is possible to have an unchanging
11654 * timeout.
11655 */
11656 if (npkts < rcv_intr_count) {
11657 /*
11658 * Not enough packets arrived before the timeout, adjust
11659 * timeout downward.
11660 */
11661 if (timeout < 2) /* already at minimum? */
11662 return;
11663 timeout >>= 1;
11664 } else {
11665 /*
11666 * More than enough packets arrived before the timeout, adjust
11667 * timeout upward.
11668 */
11669 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11670 return;
11671 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11672 }
11673
11674 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011675 /*
11676 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11677 * been verified to be in range
11678 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011679 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011680 (u64)timeout <<
11681 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011682}
11683
11684void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11685 u32 intr_adjust, u32 npkts)
11686{
11687 struct hfi1_devdata *dd = rcd->dd;
11688 u64 reg;
11689 u32 ctxt = rcd->ctxt;
11690
11691 /*
11692 * Need to write timeout register before updating RcvHdrHead to ensure
11693 * that a new value is used when the HW decides to restart counting.
11694 */
11695 if (intr_adjust)
11696 adjust_rcv_timeout(rcd, npkts);
11697 if (updegr) {
11698 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11699 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11700 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11701 }
11702 mmiowb();
11703 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11704 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11705 << RCV_HDR_HEAD_HEAD_SHIFT);
11706 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11707 mmiowb();
11708}
11709
11710u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11711{
11712 u32 head, tail;
11713
11714 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11715 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11716
11717 if (rcd->rcvhdrtail_kvaddr)
11718 tail = get_rcvhdrtail(rcd);
11719 else
11720 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11721
11722 return head == tail;
11723}
11724
11725/*
11726 * Context Control and Receive Array encoding for buffer size:
11727 * 0x0 invalid
11728 * 0x1 4 KB
11729 * 0x2 8 KB
11730 * 0x3 16 KB
11731 * 0x4 32 KB
11732 * 0x5 64 KB
11733 * 0x6 128 KB
11734 * 0x7 256 KB
11735 * 0x8 512 KB (Receive Array only)
11736 * 0x9 1 MB (Receive Array only)
11737 * 0xa 2 MB (Receive Array only)
11738 *
11739 * 0xB-0xF - reserved (Receive Array only)
11740 *
11741 *
11742 * This routine assumes that the value has already been sanity checked.
11743 */
11744static u32 encoded_size(u32 size)
11745{
11746 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011747 case 4 * 1024: return 0x1;
11748 case 8 * 1024: return 0x2;
11749 case 16 * 1024: return 0x3;
11750 case 32 * 1024: return 0x4;
11751 case 64 * 1024: return 0x5;
11752 case 128 * 1024: return 0x6;
11753 case 256 * 1024: return 0x7;
11754 case 512 * 1024: return 0x8;
11755 case 1 * 1024 * 1024: return 0x9;
11756 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011757 }
11758 return 0x1; /* if invalid, go with the minimum size */
11759}
11760
Michael J. Ruhl22505632017-07-24 07:46:06 -070011761void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11762 struct hfi1_ctxtdata *rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011763{
Mike Marciniszyn77241052015-07-30 15:17:43 -040011764 u64 rcvctrl, reg;
11765 int did_enable = 0;
Michael J. Ruhl22505632017-07-24 07:46:06 -070011766 u16 ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011767
Mike Marciniszyn77241052015-07-30 15:17:43 -040011768 if (!rcd)
11769 return;
11770
Michael J. Ruhl22505632017-07-24 07:46:06 -070011771 ctxt = rcd->ctxt;
11772
Mike Marciniszyn77241052015-07-30 15:17:43 -040011773 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11774
11775 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11776 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011777 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11778 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011779 /* reset the tail and hdr addresses, and sequence count */
11780 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011781 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011782 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11783 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011784 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011785 rcd->seq_cnt = 1;
11786
11787 /* reset the cached receive header queue head value */
11788 rcd->head = 0;
11789
11790 /*
11791 * Zero the receive header queue so we don't get false
11792 * positives when checking the sequence number. The
11793 * sequence numbers could land exactly on the same spot.
11794 * E.g. a rcd restart before the receive header wrapped.
11795 */
11796 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11797
11798 /* starting timeout */
11799 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11800
11801 /* enable the context */
11802 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11803
11804 /* clean the egr buffer size first */
11805 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11806 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11807 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11808 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11809
11810 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11811 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11812 did_enable = 1;
11813
11814 /* zero RcvEgrIndexHead */
11815 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11816
11817 /* set eager count and base index */
11818 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11819 & RCV_EGR_CTRL_EGR_CNT_MASK)
11820 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11821 (((rcd->eager_base >> RCV_SHIFT)
11822 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11823 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11824 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11825
11826 /*
11827 * Set TID (expected) count and base index.
11828 * rcd->expected_count is set to individual RcvArray entries,
11829 * not pairs, and the CSR takes a pair-count in groups of
11830 * four, so divide by 8.
11831 */
11832 reg = (((rcd->expected_count >> RCV_SHIFT)
11833 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11834 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11835 (((rcd->expected_base >> RCV_SHIFT)
11836 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11837 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11838 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011839 if (ctxt == HFI1_CTRL_CTXT)
11840 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011841 }
11842 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11843 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011844 /*
11845 * When receive context is being disabled turn on tail
11846 * update with a dummy tail address and then disable
11847 * receive context.
11848 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011849 if (dd->rcvhdrtail_dummy_dma) {
Mark F. Brown46b010d2015-11-09 19:18:20 -050011850 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011851 dd->rcvhdrtail_dummy_dma);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011852 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011853 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11854 }
11855
Mike Marciniszyn77241052015-07-30 15:17:43 -040011856 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11857 }
11858 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11859 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11860 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11861 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011862 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011863 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011864 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11865 /* See comment on RcvCtxtCtrl.TailUpd above */
11866 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11867 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11868 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011869 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11870 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11871 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11872 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11873 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011874 /*
11875 * In one-packet-per-eager mode, the size comes from
11876 * the RcvArray entry.
11877 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011878 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11879 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11880 }
11881 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11882 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11883 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11884 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11885 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11886 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11887 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11888 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11889 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11890 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11891 rcd->rcvctrl = rcvctrl;
11892 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11893 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11894
11895 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011896 if (did_enable &&
11897 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011898 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11899 if (reg != 0) {
11900 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011901 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011902 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11903 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11904 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11905 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11906 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11907 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011908 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011909 }
11910 }
11911
11912 if (did_enable) {
11913 /*
11914 * The interrupt timeout and count must be set after
11915 * the context is enabled to take effect.
11916 */
11917 /* set interrupt timeout */
11918 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011919 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011920 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11921
11922 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11923 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11924 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11925 }
11926
11927 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11928 /*
11929 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011930 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11931 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011932 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011933 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011934 dd->rcvhdrtail_dummy_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011935}
11936
Dean Luick582e05c2016-02-18 11:13:01 -080011937u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011938{
11939 int ret;
11940 u64 val = 0;
11941
11942 if (namep) {
11943 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011944 *namep = dd->cntrnames;
11945 } else {
11946 const struct cntr_entry *entry;
11947 int i, j;
11948
11949 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011950
11951 /* Get the start of the block of counters */
11952 *cntrp = dd->cntrs;
11953
11954 /*
11955 * Now go and fill in each counter in the block.
11956 */
11957 for (i = 0; i < DEV_CNTR_LAST; i++) {
11958 entry = &dev_cntrs[i];
11959 hfi1_cdbg(CNTR, "reading %s", entry->name);
11960 if (entry->flags & CNTR_DISABLED) {
11961 /* Nothing */
11962 hfi1_cdbg(CNTR, "\tDisabled\n");
11963 } else {
11964 if (entry->flags & CNTR_VL) {
11965 hfi1_cdbg(CNTR, "\tPer VL\n");
11966 for (j = 0; j < C_VL_COUNT; j++) {
11967 val = entry->rw_cntr(entry,
11968 dd, j,
11969 CNTR_MODE_R,
11970 0);
11971 hfi1_cdbg(
11972 CNTR,
11973 "\t\tRead 0x%llx for %d\n",
11974 val, j);
11975 dd->cntrs[entry->offset + j] =
11976 val;
11977 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011978 } else if (entry->flags & CNTR_SDMA) {
11979 hfi1_cdbg(CNTR,
11980 "\t Per SDMA Engine\n");
11981 for (j = 0; j < dd->chip_sdma_engines;
11982 j++) {
11983 val =
11984 entry->rw_cntr(entry, dd, j,
11985 CNTR_MODE_R, 0);
11986 hfi1_cdbg(CNTR,
11987 "\t\tRead 0x%llx for %d\n",
11988 val, j);
11989 dd->cntrs[entry->offset + j] =
11990 val;
11991 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011992 } else {
11993 val = entry->rw_cntr(entry, dd,
11994 CNTR_INVALID_VL,
11995 CNTR_MODE_R, 0);
11996 dd->cntrs[entry->offset] = val;
11997 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11998 }
11999 }
12000 }
12001 }
12002 return ret;
12003}
12004
12005/*
12006 * Used by sysfs to create files for hfi stats to read
12007 */
Dean Luick582e05c2016-02-18 11:13:01 -080012008u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012009{
12010 int ret;
12011 u64 val = 0;
12012
12013 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080012014 ret = ppd->dd->portcntrnameslen;
12015 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012016 } else {
12017 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012018 int i, j;
12019
Dean Luick582e05c2016-02-18 11:13:01 -080012020 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012021 *cntrp = ppd->cntrs;
12022
12023 for (i = 0; i < PORT_CNTR_LAST; i++) {
12024 entry = &port_cntrs[i];
12025 hfi1_cdbg(CNTR, "reading %s", entry->name);
12026 if (entry->flags & CNTR_DISABLED) {
12027 /* Nothing */
12028 hfi1_cdbg(CNTR, "\tDisabled\n");
12029 continue;
12030 }
12031
12032 if (entry->flags & CNTR_VL) {
12033 hfi1_cdbg(CNTR, "\tPer VL");
12034 for (j = 0; j < C_VL_COUNT; j++) {
12035 val = entry->rw_cntr(entry, ppd, j,
12036 CNTR_MODE_R,
12037 0);
12038 hfi1_cdbg(
12039 CNTR,
12040 "\t\tRead 0x%llx for %d",
12041 val, j);
12042 ppd->cntrs[entry->offset + j] = val;
12043 }
12044 } else {
12045 val = entry->rw_cntr(entry, ppd,
12046 CNTR_INVALID_VL,
12047 CNTR_MODE_R,
12048 0);
12049 ppd->cntrs[entry->offset] = val;
12050 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12051 }
12052 }
12053 }
12054 return ret;
12055}
12056
12057static void free_cntrs(struct hfi1_devdata *dd)
12058{
12059 struct hfi1_pportdata *ppd;
12060 int i;
12061
12062 if (dd->synth_stats_timer.data)
12063 del_timer_sync(&dd->synth_stats_timer);
12064 dd->synth_stats_timer.data = 0;
12065 ppd = (struct hfi1_pportdata *)(dd + 1);
12066 for (i = 0; i < dd->num_pports; i++, ppd++) {
12067 kfree(ppd->cntrs);
12068 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080012069 free_percpu(ppd->ibport_data.rvp.rc_acks);
12070 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12071 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012072 ppd->cntrs = NULL;
12073 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080012074 ppd->ibport_data.rvp.rc_acks = NULL;
12075 ppd->ibport_data.rvp.rc_qacks = NULL;
12076 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012077 }
12078 kfree(dd->portcntrnames);
12079 dd->portcntrnames = NULL;
12080 kfree(dd->cntrs);
12081 dd->cntrs = NULL;
12082 kfree(dd->scntrs);
12083 dd->scntrs = NULL;
12084 kfree(dd->cntrnames);
12085 dd->cntrnames = NULL;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012086 if (dd->update_cntr_wq) {
12087 destroy_workqueue(dd->update_cntr_wq);
12088 dd->update_cntr_wq = NULL;
12089 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012090}
12091
Mike Marciniszyn77241052015-07-30 15:17:43 -040012092static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12093 u64 *psval, void *context, int vl)
12094{
12095 u64 val;
12096 u64 sval = *psval;
12097
12098 if (entry->flags & CNTR_DISABLED) {
12099 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12100 return 0;
12101 }
12102
12103 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12104
12105 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12106
12107 /* If its a synthetic counter there is more work we need to do */
12108 if (entry->flags & CNTR_SYNTH) {
12109 if (sval == CNTR_MAX) {
12110 /* No need to read already saturated */
12111 return CNTR_MAX;
12112 }
12113
12114 if (entry->flags & CNTR_32BIT) {
12115 /* 32bit counters can wrap multiple times */
12116 u64 upper = sval >> 32;
12117 u64 lower = (sval << 32) >> 32;
12118
12119 if (lower > val) { /* hw wrapped */
12120 if (upper == CNTR_32BIT_MAX)
12121 val = CNTR_MAX;
12122 else
12123 upper++;
12124 }
12125
12126 if (val != CNTR_MAX)
12127 val = (upper << 32) | val;
12128
12129 } else {
12130 /* If we rolled we are saturated */
12131 if ((val < sval) || (val > CNTR_MAX))
12132 val = CNTR_MAX;
12133 }
12134 }
12135
12136 *psval = val;
12137
12138 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12139
12140 return val;
12141}
12142
12143static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12144 struct cntr_entry *entry,
12145 u64 *psval, void *context, int vl, u64 data)
12146{
12147 u64 val;
12148
12149 if (entry->flags & CNTR_DISABLED) {
12150 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12151 return 0;
12152 }
12153
12154 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12155
12156 if (entry->flags & CNTR_SYNTH) {
12157 *psval = data;
12158 if (entry->flags & CNTR_32BIT) {
12159 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12160 (data << 32) >> 32);
12161 val = data; /* return the full 64bit value */
12162 } else {
12163 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12164 data);
12165 }
12166 } else {
12167 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12168 }
12169
12170 *psval = val;
12171
12172 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12173
12174 return val;
12175}
12176
12177u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12178{
12179 struct cntr_entry *entry;
12180 u64 *sval;
12181
12182 entry = &dev_cntrs[index];
12183 sval = dd->scntrs + entry->offset;
12184
12185 if (vl != CNTR_INVALID_VL)
12186 sval += vl;
12187
12188 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12189}
12190
12191u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12192{
12193 struct cntr_entry *entry;
12194 u64 *sval;
12195
12196 entry = &dev_cntrs[index];
12197 sval = dd->scntrs + entry->offset;
12198
12199 if (vl != CNTR_INVALID_VL)
12200 sval += vl;
12201
12202 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12203}
12204
12205u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12206{
12207 struct cntr_entry *entry;
12208 u64 *sval;
12209
12210 entry = &port_cntrs[index];
12211 sval = ppd->scntrs + entry->offset;
12212
12213 if (vl != CNTR_INVALID_VL)
12214 sval += vl;
12215
12216 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12217 (index <= C_RCV_HDR_OVF_LAST)) {
12218 /* We do not want to bother for disabled contexts */
12219 return 0;
12220 }
12221
12222 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12223}
12224
12225u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12226{
12227 struct cntr_entry *entry;
12228 u64 *sval;
12229
12230 entry = &port_cntrs[index];
12231 sval = ppd->scntrs + entry->offset;
12232
12233 if (vl != CNTR_INVALID_VL)
12234 sval += vl;
12235
12236 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12237 (index <= C_RCV_HDR_OVF_LAST)) {
12238 /* We do not want to bother for disabled contexts */
12239 return 0;
12240 }
12241
12242 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12243}
12244
Tadeusz Struk22546b72017-04-28 10:40:02 -070012245static void do_update_synth_timer(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012246{
12247 u64 cur_tx;
12248 u64 cur_rx;
12249 u64 total_flits;
12250 u8 update = 0;
12251 int i, j, vl;
12252 struct hfi1_pportdata *ppd;
12253 struct cntr_entry *entry;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012254 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12255 update_cntr_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012256
12257 /*
12258 * Rather than keep beating on the CSRs pick a minimal set that we can
12259 * check to watch for potential roll over. We can do this by looking at
12260 * the number of flits sent/recv. If the total flits exceeds 32bits then
12261 * we have to iterate all the counters and update.
12262 */
12263 entry = &dev_cntrs[C_DC_RCV_FLITS];
12264 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12265
12266 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12267 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12268
12269 hfi1_cdbg(
12270 CNTR,
12271 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12272 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12273
12274 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12275 /*
12276 * May not be strictly necessary to update but it won't hurt and
12277 * simplifies the logic here.
12278 */
12279 update = 1;
12280 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12281 dd->unit);
12282 } else {
12283 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12284 hfi1_cdbg(CNTR,
12285 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12286 total_flits, (u64)CNTR_32BIT_MAX);
12287 if (total_flits >= CNTR_32BIT_MAX) {
12288 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12289 dd->unit);
12290 update = 1;
12291 }
12292 }
12293
12294 if (update) {
12295 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12296 for (i = 0; i < DEV_CNTR_LAST; i++) {
12297 entry = &dev_cntrs[i];
12298 if (entry->flags & CNTR_VL) {
12299 for (vl = 0; vl < C_VL_COUNT; vl++)
12300 read_dev_cntr(dd, i, vl);
12301 } else {
12302 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12303 }
12304 }
12305 ppd = (struct hfi1_pportdata *)(dd + 1);
12306 for (i = 0; i < dd->num_pports; i++, ppd++) {
12307 for (j = 0; j < PORT_CNTR_LAST; j++) {
12308 entry = &port_cntrs[j];
12309 if (entry->flags & CNTR_VL) {
12310 for (vl = 0; vl < C_VL_COUNT; vl++)
12311 read_port_cntr(ppd, j, vl);
12312 } else {
12313 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12314 }
12315 }
12316 }
12317
12318 /*
12319 * We want the value in the register. The goal is to keep track
12320 * of the number of "ticks" not the counter value. In other
12321 * words if the register rolls we want to notice it and go ahead
12322 * and force an update.
12323 */
12324 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12325 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12326 CNTR_MODE_R, 0);
12327
12328 entry = &dev_cntrs[C_DC_RCV_FLITS];
12329 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12330 CNTR_MODE_R, 0);
12331
12332 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12333 dd->unit, dd->last_tx, dd->last_rx);
12334
12335 } else {
12336 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12337 }
Tadeusz Struk22546b72017-04-28 10:40:02 -070012338}
Mike Marciniszyn77241052015-07-30 15:17:43 -040012339
Tadeusz Struk22546b72017-04-28 10:40:02 -070012340static void update_synth_timer(unsigned long opaque)
12341{
12342 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12343
12344 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
Bart Van Assche48a0cc132016-06-03 12:09:56 -070012345 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012346}
12347
Jianxin Xiong09a79082016-10-25 13:12:40 -070012348#define C_MAX_NAME 16 /* 15 chars + one for /0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012349static int init_cntrs(struct hfi1_devdata *dd)
12350{
Dean Luickc024c552016-01-11 18:30:57 -050012351 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012352 size_t sz;
12353 char *p;
12354 char name[C_MAX_NAME];
12355 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012356 const char *bit_type_32 = ",32";
12357 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012358
12359 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053012360 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12361 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012362
12363 /***********************/
12364 /* per device counters */
12365 /***********************/
12366
12367 /* size names and determine how many we have*/
12368 dd->ndevcntrs = 0;
12369 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012370
12371 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012372 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12373 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12374 continue;
12375 }
12376
12377 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050012378 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012379 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012380 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012381 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012382 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012383 /* Add ",32" for 32-bit counters */
12384 if (dev_cntrs[i].flags & CNTR_32BIT)
12385 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012386 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012387 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012388 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012389 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050012390 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012391 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012392 snprintf(name, C_MAX_NAME, "%s%d",
12393 dev_cntrs[i].name, j);
12394 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012395 /* Add ",32" for 32-bit counters */
12396 if (dev_cntrs[i].flags & CNTR_32BIT)
12397 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012398 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012399 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012400 }
12401 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012402 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012403 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012404 /* Add ",32" for 32-bit counters */
12405 if (dev_cntrs[i].flags & CNTR_32BIT)
12406 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012407 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012408 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012409 }
12410 }
12411
12412 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012413 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012414 if (!dd->cntrs)
12415 goto bail;
12416
Dean Luickc024c552016-01-11 18:30:57 -050012417 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012418 if (!dd->scntrs)
12419 goto bail;
12420
Mike Marciniszyn77241052015-07-30 15:17:43 -040012421 /* allocate space for the counter names */
12422 dd->cntrnameslen = sz;
12423 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12424 if (!dd->cntrnames)
12425 goto bail;
12426
12427 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012428 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012429 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12430 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012431 } else if (dev_cntrs[i].flags & CNTR_VL) {
12432 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012433 snprintf(name, C_MAX_NAME, "%s%d",
12434 dev_cntrs[i].name,
12435 vl_from_idx(j));
12436 memcpy(p, name, strlen(name));
12437 p += strlen(name);
12438
12439 /* Counter is 32 bits */
12440 if (dev_cntrs[i].flags & CNTR_32BIT) {
12441 memcpy(p, bit_type_32, bit_type_32_sz);
12442 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012443 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012444
Mike Marciniszyn77241052015-07-30 15:17:43 -040012445 *p++ = '\n';
12446 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012447 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12448 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012449 snprintf(name, C_MAX_NAME, "%s%d",
12450 dev_cntrs[i].name, j);
12451 memcpy(p, name, strlen(name));
12452 p += strlen(name);
12453
12454 /* Counter is 32 bits */
12455 if (dev_cntrs[i].flags & CNTR_32BIT) {
12456 memcpy(p, bit_type_32, bit_type_32_sz);
12457 p += bit_type_32_sz;
12458 }
12459
12460 *p++ = '\n';
12461 }
12462 } else {
12463 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12464 p += strlen(dev_cntrs[i].name);
12465
12466 /* Counter is 32 bits */
12467 if (dev_cntrs[i].flags & CNTR_32BIT) {
12468 memcpy(p, bit_type_32, bit_type_32_sz);
12469 p += bit_type_32_sz;
12470 }
12471
12472 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012473 }
12474 }
12475
12476 /*********************/
12477 /* per port counters */
12478 /*********************/
12479
12480 /*
12481 * Go through the counters for the overflows and disable the ones we
12482 * don't need. This varies based on platform so we need to do it
12483 * dynamically here.
12484 */
12485 rcv_ctxts = dd->num_rcv_contexts;
12486 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12487 i <= C_RCV_HDR_OVF_LAST; i++) {
12488 port_cntrs[i].flags |= CNTR_DISABLED;
12489 }
12490
12491 /* size port counter names and determine how many we have*/
12492 sz = 0;
12493 dd->nportcntrs = 0;
12494 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012495 if (port_cntrs[i].flags & CNTR_DISABLED) {
12496 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12497 continue;
12498 }
12499
12500 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012501 port_cntrs[i].offset = dd->nportcntrs;
12502 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012503 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012504 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012505 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012506 /* Add ",32" for 32-bit counters */
12507 if (port_cntrs[i].flags & CNTR_32BIT)
12508 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012509 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012510 dd->nportcntrs++;
12511 }
12512 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012513 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012514 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012515 /* Add ",32" for 32-bit counters */
12516 if (port_cntrs[i].flags & CNTR_32BIT)
12517 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012518 port_cntrs[i].offset = dd->nportcntrs;
12519 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012520 }
12521 }
12522
12523 /* allocate space for the counter names */
12524 dd->portcntrnameslen = sz;
12525 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12526 if (!dd->portcntrnames)
12527 goto bail;
12528
12529 /* fill in port cntr names */
12530 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12531 if (port_cntrs[i].flags & CNTR_DISABLED)
12532 continue;
12533
12534 if (port_cntrs[i].flags & CNTR_VL) {
12535 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012536 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012537 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012538 memcpy(p, name, strlen(name));
12539 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012540
12541 /* Counter is 32 bits */
12542 if (port_cntrs[i].flags & CNTR_32BIT) {
12543 memcpy(p, bit_type_32, bit_type_32_sz);
12544 p += bit_type_32_sz;
12545 }
12546
Mike Marciniszyn77241052015-07-30 15:17:43 -040012547 *p++ = '\n';
12548 }
12549 } else {
12550 memcpy(p, port_cntrs[i].name,
12551 strlen(port_cntrs[i].name));
12552 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012553
12554 /* Counter is 32 bits */
12555 if (port_cntrs[i].flags & CNTR_32BIT) {
12556 memcpy(p, bit_type_32, bit_type_32_sz);
12557 p += bit_type_32_sz;
12558 }
12559
Mike Marciniszyn77241052015-07-30 15:17:43 -040012560 *p++ = '\n';
12561 }
12562 }
12563
12564 /* allocate per port storage for counter values */
12565 ppd = (struct hfi1_pportdata *)(dd + 1);
12566 for (i = 0; i < dd->num_pports; i++, ppd++) {
12567 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12568 if (!ppd->cntrs)
12569 goto bail;
12570
12571 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12572 if (!ppd->scntrs)
12573 goto bail;
12574 }
12575
12576 /* CPU counters need to be allocated and zeroed */
12577 if (init_cpu_counters(dd))
12578 goto bail;
12579
Tadeusz Struk22546b72017-04-28 10:40:02 -070012580 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12581 WQ_MEM_RECLAIM, dd->unit);
12582 if (!dd->update_cntr_wq)
12583 goto bail;
12584
12585 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12586
Mike Marciniszyn77241052015-07-30 15:17:43 -040012587 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12588 return 0;
12589bail:
12590 free_cntrs(dd);
12591 return -ENOMEM;
12592}
12593
Mike Marciniszyn77241052015-07-30 15:17:43 -040012594static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12595{
12596 switch (chip_lstate) {
12597 default:
12598 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012599 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12600 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012601 /* fall through */
12602 case LSTATE_DOWN:
12603 return IB_PORT_DOWN;
12604 case LSTATE_INIT:
12605 return IB_PORT_INIT;
12606 case LSTATE_ARMED:
12607 return IB_PORT_ARMED;
12608 case LSTATE_ACTIVE:
12609 return IB_PORT_ACTIVE;
12610 }
12611}
12612
12613u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12614{
12615 /* look at the HFI meta-states only */
12616 switch (chip_pstate & 0xf0) {
12617 default:
12618 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012619 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012620 /* fall through */
12621 case PLS_DISABLED:
12622 return IB_PORTPHYSSTATE_DISABLED;
12623 case PLS_OFFLINE:
12624 return OPA_PORTPHYSSTATE_OFFLINE;
12625 case PLS_POLLING:
12626 return IB_PORTPHYSSTATE_POLLING;
12627 case PLS_CONFIGPHY:
12628 return IB_PORTPHYSSTATE_TRAINING;
12629 case PLS_LINKUP:
12630 return IB_PORTPHYSSTATE_LINKUP;
12631 case PLS_PHYTEST:
12632 return IB_PORTPHYSSTATE_PHY_TEST;
12633 }
12634}
12635
12636/* return the OPA port logical state name */
12637const char *opa_lstate_name(u32 lstate)
12638{
12639 static const char * const port_logical_names[] = {
12640 "PORT_NOP",
12641 "PORT_DOWN",
12642 "PORT_INIT",
12643 "PORT_ARMED",
12644 "PORT_ACTIVE",
12645 "PORT_ACTIVE_DEFER",
12646 };
12647 if (lstate < ARRAY_SIZE(port_logical_names))
12648 return port_logical_names[lstate];
12649 return "unknown";
12650}
12651
12652/* return the OPA port physical state name */
12653const char *opa_pstate_name(u32 pstate)
12654{
12655 static const char * const port_physical_names[] = {
12656 "PHYS_NOP",
12657 "reserved1",
12658 "PHYS_POLL",
12659 "PHYS_DISABLED",
12660 "PHYS_TRAINING",
12661 "PHYS_LINKUP",
12662 "PHYS_LINK_ERR_RECOVER",
12663 "PHYS_PHY_TEST",
12664 "reserved8",
12665 "PHYS_OFFLINE",
12666 "PHYS_GANGED",
12667 "PHYS_TEST",
12668 };
12669 if (pstate < ARRAY_SIZE(port_physical_names))
12670 return port_physical_names[pstate];
12671 return "unknown";
12672}
12673
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012674static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012675{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012676 /*
12677 * Set port status flags in the page mapped into userspace
12678 * memory. Do it here to ensure a reliable state - this is
12679 * the only function called by all state handling code.
12680 * Always set the flags due to the fact that the cache value
12681 * might have been changed explicitly outside of this
12682 * function.
12683 */
12684 if (ppd->statusp) {
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012685 switch (state) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012686 case IB_PORT_DOWN:
12687 case IB_PORT_INIT:
12688 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12689 HFI1_STATUS_IB_READY);
12690 break;
12691 case IB_PORT_ARMED:
12692 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12693 break;
12694 case IB_PORT_ACTIVE:
12695 *ppd->statusp |= HFI1_STATUS_IB_READY;
12696 break;
12697 }
12698 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012699}
12700
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012701/*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012702 * wait_logical_linkstate - wait for an IB link state change to occur
12703 * @ppd: port device
12704 * @state: the state to wait for
12705 * @msecs: the number of milliseconds to wait
12706 *
12707 * Wait up to msecs milliseconds for IB link state change to occur.
12708 * For now, take the easy polling route.
12709 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12710 */
12711static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12712 int msecs)
12713{
12714 unsigned long timeout;
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012715 u32 new_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012716
12717 timeout = jiffies + msecs_to_jiffies(msecs);
12718 while (1) {
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012719 new_state = chip_to_opa_lstate(ppd->dd,
12720 read_logical_state(ppd->dd));
12721 if (new_state == state)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012722 break;
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012723 if (time_after(jiffies, timeout)) {
12724 dd_dev_err(ppd->dd,
12725 "timeout waiting for link state 0x%x\n",
12726 state);
12727 return -ETIMEDOUT;
12728 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012729 msleep(20);
12730 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012731
Byczkowski, Jakub02a222c2017-08-04 13:52:26 -070012732 update_statusp(ppd, state);
12733 dd_dev_info(ppd->dd,
12734 "logical state changed to %s (0x%x)\n",
12735 opa_lstate_name(state),
12736 state);
12737 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012738}
12739
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012740/*
12741 * Read the physical hardware link state and set the driver's cached value
12742 * of it.
12743 */
12744void cache_physical_state(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012745{
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012746 u32 read_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012747 u32 ib_pstate;
12748
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012749 read_pstate = read_physical_state(ppd->dd);
12750 ib_pstate = chip_to_opa_pstate(ppd->dd, read_pstate);
12751 /* check if OPA pstate changed */
12752 if (chip_to_opa_pstate(ppd->dd, ppd->pstate) != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012753 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012754 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12755 __func__, opa_pstate_name(ib_pstate), ib_pstate,
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012756 read_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012757 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012758 ppd->pstate = read_pstate;
12759}
12760
12761/*
12762 * wait_physical_linkstate - wait for an physical link state change to occur
12763 * @ppd: port device
12764 * @state: the state to wait for
12765 * @msecs: the number of milliseconds to wait
12766 *
12767 * Wait up to msecs milliseconds for physical link state change to occur.
12768 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12769 */
12770static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12771 int msecs)
12772{
12773 unsigned long timeout;
12774
12775 timeout = jiffies + msecs_to_jiffies(msecs);
12776 while (1) {
12777 cache_physical_state(ppd);
12778 if (ppd->pstate == state)
12779 break;
12780 if (time_after(jiffies, timeout)) {
12781 dd_dev_err(ppd->dd,
12782 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
12783 state, ppd->pstate);
12784 return -ETIMEDOUT;
12785 }
12786 usleep_range(1950, 2050); /* sleep 2ms-ish */
12787 }
12788
12789 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012790}
12791
Mike Marciniszyn77241052015-07-30 15:17:43 -040012792#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12793(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12794
12795#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12796(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12797
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070012798void hfi1_init_ctxt(struct send_context *sc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012799{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012800 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012801 struct hfi1_devdata *dd = sc->dd;
12802 u64 reg;
12803 u8 set = (sc->type == SC_USER ?
12804 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12805 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12806 reg = read_kctxt_csr(dd, sc->hw_context,
12807 SEND_CTXT_CHECK_ENABLE);
12808 if (set)
12809 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12810 else
12811 SET_STATIC_RATE_CONTROL_SMASK(reg);
12812 write_kctxt_csr(dd, sc->hw_context,
12813 SEND_CTXT_CHECK_ENABLE, reg);
12814 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012815}
12816
12817int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12818{
12819 int ret = 0;
12820 u64 reg;
12821
12822 if (dd->icode != ICODE_RTL_SILICON) {
12823 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12824 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12825 __func__);
12826 return -EINVAL;
12827 }
12828 reg = read_csr(dd, ASIC_STS_THERM);
12829 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12830 ASIC_STS_THERM_CURR_TEMP_MASK);
12831 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12832 ASIC_STS_THERM_LO_TEMP_MASK);
12833 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12834 ASIC_STS_THERM_HI_TEMP_MASK);
12835 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12836 ASIC_STS_THERM_CRIT_TEMP_MASK);
12837 /* triggers is a 3-bit value - 1 bit per trigger. */
12838 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12839
12840 return ret;
12841}
12842
12843/* ========================================================================= */
12844
12845/*
12846 * Enable/disable chip from delivering interrupts.
12847 */
12848void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12849{
12850 int i;
12851
12852 /*
12853 * In HFI, the mask needs to be 1 to allow interrupts.
12854 */
12855 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012856 /* enable all interrupts */
12857 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012858 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012859
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012860 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012861 } else {
12862 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012863 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012864 }
12865}
12866
12867/*
12868 * Clear all interrupt sources on the chip.
12869 */
12870static void clear_all_interrupts(struct hfi1_devdata *dd)
12871{
12872 int i;
12873
12874 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012875 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012876
12877 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12878 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12879 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12880 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12881 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12882 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12883 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12884 for (i = 0; i < dd->chip_send_contexts; i++)
12885 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12886 for (i = 0; i < dd->chip_sdma_engines; i++)
12887 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12888
12889 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12890 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12891 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12892}
12893
12894/* Move to pcie.c? */
12895static void disable_intx(struct pci_dev *pdev)
12896{
12897 pci_intx(pdev, 0);
12898}
12899
12900static void clean_up_interrupts(struct hfi1_devdata *dd)
12901{
12902 int i;
12903
12904 /* remove irqs - must happen before disabling/turning off */
12905 if (dd->num_msix_entries) {
12906 /* MSI-X */
12907 struct hfi1_msix_entry *me = dd->msix_entries;
12908
12909 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012910 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012911 continue;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012912 hfi1_put_irq_affinity(dd, me);
12913 free_irq(me->irq, me->arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012914 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012915
12916 /* clean structures */
12917 kfree(dd->msix_entries);
12918 dd->msix_entries = NULL;
12919 dd->num_msix_entries = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012920 } else {
12921 /* INTx */
12922 if (dd->requested_intx_irq) {
12923 free_irq(dd->pcidev->irq, dd);
12924 dd->requested_intx_irq = 0;
12925 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012926 disable_intx(dd->pcidev);
12927 }
12928
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012929 pci_free_irq_vectors(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012930}
12931
12932/*
12933 * Remap the interrupt source from the general handler to the given MSI-X
12934 * interrupt.
12935 */
12936static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12937{
12938 u64 reg;
12939 int m, n;
12940
12941 /* clear from the handled mask of the general interrupt */
12942 m = isrc / 64;
12943 n = isrc % 64;
Dennis Dalessandrobc54f672017-05-29 17:18:14 -070012944 if (likely(m < CCE_NUM_INT_CSRS)) {
12945 dd->gi_mask[m] &= ~((u64)1 << n);
12946 } else {
12947 dd_dev_err(dd, "remap interrupt err\n");
12948 return;
12949 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012950
12951 /* direct the chip source to the given MSI-X interrupt */
12952 m = isrc / 8;
12953 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012954 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12955 reg &= ~((u64)0xff << (8 * n));
12956 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12957 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012958}
12959
12960static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12961 int engine, int msix_intr)
12962{
12963 /*
12964 * SDMA engine interrupt sources grouped by type, rather than
12965 * engine. Per-engine interrupts are as follows:
12966 * SDMA
12967 * SDMAProgress
12968 * SDMAIdle
12969 */
Jubin John8638b772016-02-14 20:19:24 -080012970 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012971 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012972 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012973 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012974 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012975 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012976}
12977
Mike Marciniszyn77241052015-07-30 15:17:43 -040012978static int request_intx_irq(struct hfi1_devdata *dd)
12979{
12980 int ret;
12981
Jubin John98050712015-11-16 21:59:27 -050012982 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12983 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012984 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012985 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012986 if (ret)
12987 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012988 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012989 else
12990 dd->requested_intx_irq = 1;
12991 return ret;
12992}
12993
12994static int request_msix_irqs(struct hfi1_devdata *dd)
12995{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012996 int first_general, last_general;
12997 int first_sdma, last_sdma;
12998 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012999 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013000
13001 /* calculate the ranges we are going to use */
13002 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080013003 last_general = first_general + 1;
13004 first_sdma = last_general;
13005 last_sdma = first_sdma + dd->num_sdma;
13006 first_rx = last_sdma;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013007 last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
13008
13009 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13010 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013011
13012 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040013013 * Sanity check - the code expects all SDMA chip source
13014 * interrupts to be in the same CSR, starting at bit 0. Verify
13015 * that this is true by checking the bit location of the start.
13016 */
13017 BUILD_BUG_ON(IS_SDMA_START % 64);
13018
13019 for (i = 0; i < dd->num_msix_entries; i++) {
13020 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13021 const char *err_info;
13022 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040013023 irq_handler_t thread = NULL;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013024 void *arg = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013025 int idx;
13026 struct hfi1_ctxtdata *rcd = NULL;
13027 struct sdma_engine *sde = NULL;
13028
13029 /* obtain the arguments to request_irq */
13030 if (first_general <= i && i < last_general) {
13031 idx = i - first_general;
13032 handler = general_interrupt;
13033 arg = dd;
13034 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050013035 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013036 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080013037 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013038 } else if (first_sdma <= i && i < last_sdma) {
13039 idx = i - first_sdma;
13040 sde = &dd->per_sdma[idx];
13041 handler = sdma_interrupt;
13042 arg = sde;
13043 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050013044 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013045 err_info = "sdma";
13046 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080013047 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013048 } else if (first_rx <= i && i < last_rx) {
13049 idx = i - first_rx;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -070013050 rcd = hfi1_rcd_get_by_index(dd, idx);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013051 if (rcd) {
13052 /*
13053 * Set the interrupt register and mask for this
13054 * context's interrupt.
13055 */
13056 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13057 rcd->imask = ((u64)1) <<
13058 ((IS_RCVAVAIL_START + idx) % 64);
13059 handler = receive_context_interrupt;
13060 thread = receive_context_thread;
13061 arg = rcd;
13062 snprintf(me->name, sizeof(me->name),
13063 DRIVER_NAME "_%d kctxt%d",
13064 dd->unit, idx);
13065 err_info = "receive context";
13066 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13067 me->type = IRQ_RCVCTXT;
13068 rcd->msix_intr = i;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -070013069 hfi1_rcd_put(rcd);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013070 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013071 } else {
13072 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080013073 * ignore it
13074 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013075 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013076 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013077 continue;
13078 }
13079 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080013080 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013081 continue;
13082 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080013083 me->name[sizeof(me->name) - 1] = 0;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013084 me->irq = pci_irq_vector(dd->pcidev, i);
13085 /*
13086 * On err return me->irq. Don't need to clear this
13087 * because 'arg' has not been set, and cleanup will
13088 * do the right thing.
13089 */
13090 if (me->irq < 0)
13091 return me->irq;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013092
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013093 ret = request_threaded_irq(me->irq, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080013094 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013095 if (ret) {
13096 dd_dev_err(dd,
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013097 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13098 err_info, me->irq, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013099 return ret;
13100 }
13101 /*
13102 * assign arg after request_irq call, so it will be
13103 * cleaned up
13104 */
13105 me->arg = arg;
13106
Mitko Haralanov957558c2016-02-03 14:33:40 -080013107 ret = hfi1_get_irq_affinity(dd, me);
13108 if (ret)
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013109 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013110 }
13111
Mike Marciniszyn77241052015-07-30 15:17:43 -040013112 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013113}
13114
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013115void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13116{
13117 int i;
13118
13119 if (!dd->num_msix_entries) {
13120 synchronize_irq(dd->pcidev->irq);
13121 return;
13122 }
13123
13124 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13125 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13126 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13127
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013128 synchronize_irq(me->irq);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013129 }
13130}
13131
13132void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13133{
13134 struct hfi1_devdata *dd = rcd->dd;
13135 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13136
13137 if (!me->arg) /* => no irq, no affinity */
13138 return;
13139
13140 hfi1_put_irq_affinity(dd, me);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013141 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013142
13143 me->arg = NULL;
13144}
13145
13146void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13147{
13148 struct hfi1_devdata *dd = rcd->dd;
13149 struct hfi1_msix_entry *me;
13150 int idx = rcd->ctxt;
13151 void *arg = rcd;
13152 int ret;
13153
13154 rcd->msix_intr = dd->vnic.msix_idx++;
13155 me = &dd->msix_entries[rcd->msix_intr];
13156
13157 /*
13158 * Set the interrupt register and mask for this
13159 * context's interrupt.
13160 */
13161 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13162 rcd->imask = ((u64)1) <<
13163 ((IS_RCVAVAIL_START + idx) % 64);
13164
13165 snprintf(me->name, sizeof(me->name),
13166 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13167 me->name[sizeof(me->name) - 1] = 0;
13168 me->type = IRQ_RCVCTXT;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013169 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13170 if (me->irq < 0) {
13171 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13172 idx, me->irq);
13173 return;
13174 }
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013175 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13176
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013177 ret = request_threaded_irq(me->irq, receive_context_interrupt,
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013178 receive_context_thread, 0, me->name, arg);
13179 if (ret) {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013180 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13181 me->irq, idx, ret);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013182 return;
13183 }
13184 /*
13185 * assign arg after request_irq call, so it will be
13186 * cleaned up
13187 */
13188 me->arg = arg;
13189
13190 ret = hfi1_get_irq_affinity(dd, me);
13191 if (ret) {
13192 dd_dev_err(dd,
13193 "unable to pin IRQ %d\n", ret);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013194 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013195 }
13196}
13197
Mike Marciniszyn77241052015-07-30 15:17:43 -040013198/*
13199 * Set the general handler to accept all interrupts, remap all
13200 * chip interrupts back to MSI-X 0.
13201 */
13202static void reset_interrupts(struct hfi1_devdata *dd)
13203{
13204 int i;
13205
13206 /* all interrupts handled by the general handler */
13207 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13208 dd->gi_mask[i] = ~(u64)0;
13209
13210 /* all chip interrupts map to MSI-X 0 */
13211 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013212 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013213}
13214
13215static int set_up_interrupts(struct hfi1_devdata *dd)
13216{
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013217 u32 total;
13218 int ret, request;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013219 int single_interrupt = 0; /* we expect to have all the interrupts */
13220
13221 /*
13222 * Interrupt count:
13223 * 1 general, "slow path" interrupt (includes the SDMA engines
13224 * slow source, SDMACleanupDone)
13225 * N interrupts - one per used SDMA engine
13226 * M interrupt - one per kernel receive context
13227 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013228 total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013229
Mike Marciniszyn77241052015-07-30 15:17:43 -040013230 /* ask for MSI-X interrupts */
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013231 request = request_msix(dd, total);
13232 if (request < 0) {
13233 ret = request;
13234 goto fail;
13235 } else if (request == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013236 /* using INTx */
13237 /* dd->num_msix_entries already zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013238 single_interrupt = 1;
13239 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013240 } else if (request < total) {
13241 /* using MSI-X, with reduced interrupts */
13242 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13243 total, request);
13244 ret = -EINVAL;
13245 goto fail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013246 } else {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013247 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13248 GFP_KERNEL);
13249 if (!dd->msix_entries) {
13250 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013251 goto fail;
13252 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013253 /* using MSI-X */
13254 dd->num_msix_entries = total;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013255 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13256 }
13257
13258 /* mask all interrupts */
13259 set_intr_state(dd, 0);
13260 /* clear all pending interrupts */
13261 clear_all_interrupts(dd);
13262
13263 /* reset general handler mask, chip MSI-X mappings */
13264 reset_interrupts(dd);
13265
13266 if (single_interrupt)
13267 ret = request_intx_irq(dd);
13268 else
13269 ret = request_msix_irqs(dd);
13270 if (ret)
13271 goto fail;
13272
13273 return 0;
13274
13275fail:
13276 clean_up_interrupts(dd);
13277 return ret;
13278}
13279
13280/*
13281 * Set up context values in dd. Sets:
13282 *
13283 * num_rcv_contexts - number of contexts being used
13284 * n_krcv_queues - number of kernel contexts
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013285 * first_dyn_alloc_ctxt - first dynamically allocated context
13286 * in array of contexts
Mike Marciniszyn77241052015-07-30 15:17:43 -040013287 * freectxts - number of free user contexts
13288 * num_send_contexts - number of PIO send contexts being used
13289 */
13290static int set_up_context_variables(struct hfi1_devdata *dd)
13291{
Harish Chegondi429b6a72016-08-31 07:24:40 -070013292 unsigned long num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013293 int total_contexts;
13294 int ret;
13295 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070013296 int qos_rmt_count;
13297 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013298
13299 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013300 * Kernel receive contexts:
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013301 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070013302 * - Context 1 - first kernel context
13303 * - Context 2 - second kernel context
13304 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040013305 */
13306 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013307 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013308 * n_krcvqs is the sum of module parameter kernel receive
13309 * contexts, krcvqs[]. It does not include the control
13310 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013311 */
Dean Luick33a9eb52016-04-12 10:50:22 -070013312 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013313 else
Harish Chegondi8784ac02016-07-25 13:38:50 -070013314 num_kernel_contexts = DEFAULT_KRCVQS + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013315 /*
13316 * Every kernel receive context needs an ACK send context.
13317 * one send context is allocated for each VL{0-7} and VL15
13318 */
13319 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13320 dd_dev_err(dd,
Harish Chegondi429b6a72016-08-31 07:24:40 -070013321 "Reducing # kernel rcv contexts to: %d, from %lu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013322 (int)(dd->chip_send_contexts - num_vls - 1),
Harish Chegondi429b6a72016-08-31 07:24:40 -070013323 num_kernel_contexts);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013324 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13325 }
13326 /*
Jubin John0852d242016-04-12 11:30:08 -070013327 * User contexts:
13328 * - default to 1 user context per real (non-HT) CPU core if
13329 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040013330 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050013331 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070013332 num_user_contexts =
Dennis Dalessandro41973442016-07-25 07:52:36 -070013333 cpumask_weight(&node_affinity.real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013334
13335 total_contexts = num_kernel_contexts + num_user_contexts;
13336
13337 /*
13338 * Adjust the counts given a global max.
13339 */
13340 if (total_contexts > dd->chip_rcv_contexts) {
13341 dd_dev_err(dd,
13342 "Reducing # user receive contexts to: %d, from %d\n",
13343 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13344 (int)num_user_contexts);
13345 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13346 /* recalculate */
13347 total_contexts = num_kernel_contexts + num_user_contexts;
13348 }
13349
Dean Luick8f000f72016-04-12 11:32:06 -070013350 /* each user context requires an entry in the RMT */
13351 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13352 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13353 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13354 dd_dev_err(dd,
13355 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13356 (int)num_user_contexts,
13357 user_rmt_reduced);
13358 /* recalculate */
13359 num_user_contexts = user_rmt_reduced;
13360 total_contexts = num_kernel_contexts + num_user_contexts;
13361 }
13362
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013363 /* Accommodate VNIC contexts */
13364 if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13365 total_contexts += HFI1_NUM_VNIC_CTXT;
13366
13367 /* the first N are kernel contexts, the rest are user/vnic contexts */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013368 dd->num_rcv_contexts = total_contexts;
13369 dd->n_krcv_queues = num_kernel_contexts;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013370 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013371 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013372 dd->freectxts = num_user_contexts;
13373 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013374 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13375 (int)dd->chip_rcv_contexts,
13376 (int)dd->num_rcv_contexts,
13377 (int)dd->n_krcv_queues,
13378 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013379
13380 /*
13381 * Receive array allocation:
13382 * All RcvArray entries are divided into groups of 8. This
13383 * is required by the hardware and will speed up writes to
13384 * consecutive entries by using write-combining of the entire
13385 * cacheline.
13386 *
13387 * The number of groups are evenly divided among all contexts.
13388 * any left over groups will be given to the first N user
13389 * contexts.
13390 */
13391 dd->rcv_entries.group_size = RCV_INCREMENT;
13392 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13393 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13394 dd->rcv_entries.nctxt_extra = ngroups -
13395 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13396 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13397 dd->rcv_entries.ngroups,
13398 dd->rcv_entries.nctxt_extra);
13399 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13400 MAX_EAGER_ENTRIES * 2) {
13401 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13402 dd->rcv_entries.group_size;
13403 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013404 "RcvArray group count too high, change to %u\n",
13405 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013406 dd->rcv_entries.nctxt_extra = 0;
13407 }
13408 /*
13409 * PIO send contexts
13410 */
13411 ret = init_sc_pools_and_sizes(dd);
13412 if (ret >= 0) { /* success */
13413 dd->num_send_contexts = ret;
13414 dd_dev_info(
13415 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013416 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013417 dd->chip_send_contexts,
13418 dd->num_send_contexts,
13419 dd->sc_sizes[SC_KERNEL].count,
13420 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013421 dd->sc_sizes[SC_USER].count,
13422 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013423 ret = 0; /* success */
13424 }
13425
13426 return ret;
13427}
13428
13429/*
13430 * Set the device/port partition key table. The MAD code
13431 * will ensure that, at least, the partial management
13432 * partition key is present in the table.
13433 */
13434static void set_partition_keys(struct hfi1_pportdata *ppd)
13435{
13436 struct hfi1_devdata *dd = ppd->dd;
13437 u64 reg = 0;
13438 int i;
13439
13440 dd_dev_info(dd, "Setting partition keys\n");
13441 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13442 reg |= (ppd->pkeys[i] &
13443 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13444 ((i % 4) *
13445 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13446 /* Each register holds 4 PKey values. */
13447 if ((i % 4) == 3) {
13448 write_csr(dd, RCV_PARTITION_KEY +
13449 ((i - 3) * 2), reg);
13450 reg = 0;
13451 }
13452 }
13453
13454 /* Always enable HW pkeys check when pkeys table is set */
13455 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13456}
13457
13458/*
13459 * These CSRs and memories are uninitialized on reset and must be
13460 * written before reading to set the ECC/parity bits.
13461 *
13462 * NOTE: All user context CSRs that are not mmaped write-only
13463 * (e.g. the TID flows) must be initialized even if the driver never
13464 * reads them.
13465 */
13466static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13467{
13468 int i, j;
13469
13470 /* CceIntMap */
13471 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013472 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013473
13474 /* SendCtxtCreditReturnAddr */
13475 for (i = 0; i < dd->chip_send_contexts; i++)
13476 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13477
13478 /* PIO Send buffers */
13479 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080013480 /*
13481 * These are not normally read, and (presently) have no method
13482 * to be read, so are not pre-initialized
13483 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013484
13485 /* RcvHdrAddr */
13486 /* RcvHdrTailAddr */
13487 /* RcvTidFlowTable */
13488 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13489 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13490 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13491 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080013492 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013493 }
13494
13495 /* RcvArray */
13496 for (i = 0; i < dd->chip_rcv_array_count; i++)
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -070013497 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013498
13499 /* RcvQPMapTable */
13500 for (i = 0; i < 32; i++)
13501 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13502}
13503
13504/*
13505 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13506 */
13507static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13508 u64 ctrl_bits)
13509{
13510 unsigned long timeout;
13511 u64 reg;
13512
13513 /* is the condition present? */
13514 reg = read_csr(dd, CCE_STATUS);
13515 if ((reg & status_bits) == 0)
13516 return;
13517
13518 /* clear the condition */
13519 write_csr(dd, CCE_CTRL, ctrl_bits);
13520
13521 /* wait for the condition to clear */
13522 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13523 while (1) {
13524 reg = read_csr(dd, CCE_STATUS);
13525 if ((reg & status_bits) == 0)
13526 return;
13527 if (time_after(jiffies, timeout)) {
13528 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013529 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13530 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013531 return;
13532 }
13533 udelay(1);
13534 }
13535}
13536
13537/* set CCE CSRs to chip reset defaults */
13538static void reset_cce_csrs(struct hfi1_devdata *dd)
13539{
13540 int i;
13541
13542 /* CCE_REVISION read-only */
13543 /* CCE_REVISION2 read-only */
13544 /* CCE_CTRL - bits clear automatically */
13545 /* CCE_STATUS read-only, use CceCtrl to clear */
13546 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13547 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13548 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13549 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13550 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13551 /* CCE_ERR_STATUS read-only */
13552 write_csr(dd, CCE_ERR_MASK, 0);
13553 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13554 /* CCE_ERR_FORCE leave alone */
13555 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13556 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13557 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13558 /* CCE_PCIE_CTRL leave alone */
13559 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13560 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13561 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013562 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013563 }
13564 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13565 /* CCE_MSIX_PBA read-only */
13566 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13567 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13568 }
13569 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13570 write_csr(dd, CCE_INT_MAP, 0);
13571 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13572 /* CCE_INT_STATUS read-only */
13573 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13574 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13575 /* CCE_INT_FORCE leave alone */
13576 /* CCE_INT_BLOCKED read-only */
13577 }
13578 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13579 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13580}
13581
Mike Marciniszyn77241052015-07-30 15:17:43 -040013582/* set MISC CSRs to chip reset defaults */
13583static void reset_misc_csrs(struct hfi1_devdata *dd)
13584{
13585 int i;
13586
13587 for (i = 0; i < 32; i++) {
13588 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13589 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13590 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13591 }
Jubin John4d114fd2016-02-14 20:21:43 -080013592 /*
13593 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13594 * only be written 128-byte chunks
13595 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013596 /* init RSA engine to clear lingering errors */
13597 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13598 write_csr(dd, MISC_CFG_RSA_MU, 0);
13599 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13600 /* MISC_STS_8051_DIGEST read-only */
13601 /* MISC_STS_SBM_DIGEST read-only */
13602 /* MISC_STS_PCIE_DIGEST read-only */
13603 /* MISC_STS_FAB_DIGEST read-only */
13604 /* MISC_ERR_STATUS read-only */
13605 write_csr(dd, MISC_ERR_MASK, 0);
13606 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13607 /* MISC_ERR_FORCE leave alone */
13608}
13609
13610/* set TXE CSRs to chip reset defaults */
13611static void reset_txe_csrs(struct hfi1_devdata *dd)
13612{
13613 int i;
13614
13615 /*
13616 * TXE Kernel CSRs
13617 */
13618 write_csr(dd, SEND_CTRL, 0);
13619 __cm_reset(dd, 0); /* reset CM internal state */
13620 /* SEND_CONTEXTS read-only */
13621 /* SEND_DMA_ENGINES read-only */
13622 /* SEND_PIO_MEM_SIZE read-only */
13623 /* SEND_DMA_MEM_SIZE read-only */
13624 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13625 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13626 /* SEND_PIO_ERR_STATUS read-only */
13627 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13628 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13629 /* SEND_PIO_ERR_FORCE leave alone */
13630 /* SEND_DMA_ERR_STATUS read-only */
13631 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13632 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13633 /* SEND_DMA_ERR_FORCE leave alone */
13634 /* SEND_EGRESS_ERR_STATUS read-only */
13635 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13636 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13637 /* SEND_EGRESS_ERR_FORCE leave alone */
13638 write_csr(dd, SEND_BTH_QP, 0);
13639 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13640 write_csr(dd, SEND_SC2VLT0, 0);
13641 write_csr(dd, SEND_SC2VLT1, 0);
13642 write_csr(dd, SEND_SC2VLT2, 0);
13643 write_csr(dd, SEND_SC2VLT3, 0);
13644 write_csr(dd, SEND_LEN_CHECK0, 0);
13645 write_csr(dd, SEND_LEN_CHECK1, 0);
13646 /* SEND_ERR_STATUS read-only */
13647 write_csr(dd, SEND_ERR_MASK, 0);
13648 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13649 /* SEND_ERR_FORCE read-only */
13650 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013651 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013652 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013653 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13654 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13655 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013656 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013657 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013658 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013659 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013660 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013661 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013662 /* SEND_CM_CREDIT_USED_STATUS read-only */
13663 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13664 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13665 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13666 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13667 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13668 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013669 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013670 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13671 /* SEND_CM_CREDIT_USED_VL read-only */
13672 /* SEND_CM_CREDIT_USED_VL15 read-only */
13673 /* SEND_EGRESS_CTXT_STATUS read-only */
13674 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13675 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13676 /* SEND_EGRESS_ERR_INFO read-only */
13677 /* SEND_EGRESS_ERR_SOURCE read-only */
13678
13679 /*
13680 * TXE Per-Context CSRs
13681 */
13682 for (i = 0; i < dd->chip_send_contexts; i++) {
13683 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13684 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13685 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13686 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13687 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13688 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13689 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13690 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13691 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13692 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13693 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13694 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13695 }
13696
13697 /*
13698 * TXE Per-SDMA CSRs
13699 */
13700 for (i = 0; i < dd->chip_sdma_engines; i++) {
13701 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13702 /* SEND_DMA_STATUS read-only */
13703 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13704 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13705 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13706 /* SEND_DMA_HEAD read-only */
13707 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13708 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13709 /* SEND_DMA_IDLE_CNT read-only */
13710 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13711 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13712 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13713 /* SEND_DMA_ENG_ERR_STATUS read-only */
13714 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13715 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13716 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13717 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13718 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13719 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13720 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13721 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13722 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13723 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13724 }
13725}
13726
13727/*
13728 * Expect on entry:
13729 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13730 */
13731static void init_rbufs(struct hfi1_devdata *dd)
13732{
13733 u64 reg;
13734 int count;
13735
13736 /*
13737 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13738 * clear.
13739 */
13740 count = 0;
13741 while (1) {
13742 reg = read_csr(dd, RCV_STATUS);
13743 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13744 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13745 break;
13746 /*
13747 * Give up after 1ms - maximum wait time.
13748 *
Harish Chegondie8a70af2016-09-25 07:42:01 -070013749 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
Mike Marciniszyn77241052015-07-30 15:17:43 -040013750 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
Harish Chegondie8a70af2016-09-25 07:42:01 -070013751 * 136 KB / (66% * 250MB/s) = 844us
Mike Marciniszyn77241052015-07-30 15:17:43 -040013752 */
13753 if (count++ > 500) {
13754 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013755 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13756 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013757 break;
13758 }
13759 udelay(2); /* do not busy-wait the CSR */
13760 }
13761
13762 /* start the init - expect RcvCtrl to be 0 */
13763 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13764
13765 /*
13766 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13767 * period after the write before RcvStatus.RxRbufInitDone is valid.
13768 * The delay in the first run through the loop below is sufficient and
13769 * required before the first read of RcvStatus.RxRbufInintDone.
13770 */
13771 read_csr(dd, RCV_CTRL);
13772
13773 /* wait for the init to finish */
13774 count = 0;
13775 while (1) {
13776 /* delay is required first time through - see above */
13777 udelay(2); /* do not busy-wait the CSR */
13778 reg = read_csr(dd, RCV_STATUS);
13779 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13780 break;
13781
13782 /* give up after 100us - slowest possible at 33MHz is 73us */
13783 if (count++ > 50) {
13784 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013785 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13786 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013787 break;
13788 }
13789 }
13790}
13791
13792/* set RXE CSRs to chip reset defaults */
13793static void reset_rxe_csrs(struct hfi1_devdata *dd)
13794{
13795 int i, j;
13796
13797 /*
13798 * RXE Kernel CSRs
13799 */
13800 write_csr(dd, RCV_CTRL, 0);
13801 init_rbufs(dd);
13802 /* RCV_STATUS read-only */
13803 /* RCV_CONTEXTS read-only */
13804 /* RCV_ARRAY_CNT read-only */
13805 /* RCV_BUF_SIZE read-only */
13806 write_csr(dd, RCV_BTH_QP, 0);
13807 write_csr(dd, RCV_MULTICAST, 0);
13808 write_csr(dd, RCV_BYPASS, 0);
13809 write_csr(dd, RCV_VL15, 0);
13810 /* this is a clear-down */
13811 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013812 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013813 /* RCV_ERR_STATUS read-only */
13814 write_csr(dd, RCV_ERR_MASK, 0);
13815 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13816 /* RCV_ERR_FORCE leave alone */
13817 for (i = 0; i < 32; i++)
13818 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13819 for (i = 0; i < 4; i++)
13820 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13821 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13822 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13823 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13824 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013825 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13826 clear_rsm_rule(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013827 for (i = 0; i < 32; i++)
13828 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13829
13830 /*
13831 * RXE Kernel and User Per-Context CSRs
13832 */
13833 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13834 /* kernel */
13835 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13836 /* RCV_CTXT_STATUS read-only */
13837 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13838 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13839 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13840 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13841 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13842 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13843 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13844 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13845 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13846 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13847
13848 /* user */
13849 /* RCV_HDR_TAIL read-only */
13850 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13851 /* RCV_EGR_INDEX_TAIL read-only */
13852 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13853 /* RCV_EGR_OFFSET_TAIL read-only */
13854 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013855 write_uctxt_csr(dd, i,
13856 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013857 }
13858 }
13859}
13860
13861/*
13862 * Set sc2vl tables.
13863 *
13864 * They power on to zeros, so to avoid send context errors
13865 * they need to be set:
13866 *
13867 * SC 0-7 -> VL 0-7 (respectively)
13868 * SC 15 -> VL 15
13869 * otherwise
13870 * -> VL 0
13871 */
13872static void init_sc2vl_tables(struct hfi1_devdata *dd)
13873{
13874 int i;
13875 /* init per architecture spec, constrained by hardware capability */
13876
13877 /* HFI maps sent packets */
13878 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13879 0,
13880 0, 0, 1, 1,
13881 2, 2, 3, 3,
13882 4, 4, 5, 5,
13883 6, 6, 7, 7));
13884 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13885 1,
13886 8, 0, 9, 0,
13887 10, 0, 11, 0,
13888 12, 0, 13, 0,
13889 14, 0, 15, 15));
13890 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13891 2,
13892 16, 0, 17, 0,
13893 18, 0, 19, 0,
13894 20, 0, 21, 0,
13895 22, 0, 23, 0));
13896 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13897 3,
13898 24, 0, 25, 0,
13899 26, 0, 27, 0,
13900 28, 0, 29, 0,
13901 30, 0, 31, 0));
13902
13903 /* DC maps received packets */
13904 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13905 15_0,
13906 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13907 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13908 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13909 31_16,
13910 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13911 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13912
13913 /* initialize the cached sc2vl values consistently with h/w */
13914 for (i = 0; i < 32; i++) {
13915 if (i < 8 || i == 15)
13916 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13917 else
13918 *((u8 *)(dd->sc2vl) + i) = 0;
13919 }
13920}
13921
13922/*
13923 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13924 * depend on the chip going through a power-on reset - a driver may be loaded
13925 * and unloaded many times.
13926 *
13927 * Do not write any CSR values to the chip in this routine - there may be
13928 * a reset following the (possible) FLR in this routine.
13929 *
13930 */
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013931static int init_chip(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013932{
13933 int i;
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013934 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013935
13936 /*
13937 * Put the HFI CSRs in a known state.
13938 * Combine this with a DC reset.
13939 *
13940 * Stop the device from doing anything while we do a
13941 * reset. We know there are no other active users of
13942 * the device since we are now in charge. Turn off
13943 * off all outbound and inbound traffic and make sure
13944 * the device does not generate any interrupts.
13945 */
13946
13947 /* disable send contexts and SDMA engines */
13948 write_csr(dd, SEND_CTRL, 0);
13949 for (i = 0; i < dd->chip_send_contexts; i++)
13950 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13951 for (i = 0; i < dd->chip_sdma_engines; i++)
13952 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13953 /* disable port (turn off RXE inbound traffic) and contexts */
13954 write_csr(dd, RCV_CTRL, 0);
13955 for (i = 0; i < dd->chip_rcv_contexts; i++)
13956 write_csr(dd, RCV_CTXT_CTRL, 0);
13957 /* mask all interrupt sources */
13958 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013959 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013960
13961 /*
13962 * DC Reset: do a full DC reset before the register clear.
13963 * A recommended length of time to hold is one CSR read,
13964 * so reread the CceDcCtrl. Then, hold the DC in reset
13965 * across the clear.
13966 */
13967 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013968 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013969
13970 if (use_flr) {
13971 /*
13972 * A FLR will reset the SPC core and part of the PCIe.
13973 * The parts that need to be restored have already been
13974 * saved.
13975 */
13976 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13977
13978 /* do the FLR, the DC reset will remain */
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013979 pcie_flr(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013980
13981 /* restore command and BARs */
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013982 ret = restore_pci_variables(dd);
13983 if (ret) {
13984 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13985 __func__);
13986 return ret;
13987 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013988
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013989 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013990 dd_dev_info(dd, "Resetting CSRs with FLR\n");
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013991 pcie_flr(dd->pcidev);
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013992 ret = restore_pci_variables(dd);
13993 if (ret) {
13994 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13995 __func__);
13996 return ret;
13997 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013998 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013999 } else {
14000 dd_dev_info(dd, "Resetting CSRs with writes\n");
14001 reset_cce_csrs(dd);
14002 reset_txe_csrs(dd);
14003 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014004 reset_misc_csrs(dd);
14005 }
14006 /* clear the DC reset */
14007 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014008
Mike Marciniszyn77241052015-07-30 15:17:43 -040014009 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080014010 setextled(dd, 0);
14011
Mike Marciniszyn77241052015-07-30 15:17:43 -040014012 /*
14013 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050014014 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040014015 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050014016 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040014017 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050014018 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014019 * I2CCLK and I2CDAT will change per direction, and INT_N and
14020 * MODPRS_N are input only and their value is ignored.
14021 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050014022 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14023 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014024 init_chip_resources(dd);
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070014025 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014026}
14027
14028static void init_early_variables(struct hfi1_devdata *dd)
14029{
14030 int i;
14031
14032 /* assign link credit variables */
14033 dd->vau = CM_VAU;
14034 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014035 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014036 dd->link_credits--;
14037 dd->vcu = cu_to_vcu(hfi1_cu);
14038 /* enough room for 8 MAD packets plus header - 17K */
14039 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14040 if (dd->vl15_init > dd->link_credits)
14041 dd->vl15_init = dd->link_credits;
14042
14043 write_uninitialized_csrs_and_memories(dd);
14044
14045 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14046 for (i = 0; i < dd->num_pports; i++) {
14047 struct hfi1_pportdata *ppd = &dd->pport[i];
14048
14049 set_partition_keys(ppd);
14050 }
14051 init_sc2vl_tables(dd);
14052}
14053
14054static void init_kdeth_qp(struct hfi1_devdata *dd)
14055{
14056 /* user changed the KDETH_QP */
14057 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14058 /* out of range or illegal value */
14059 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14060 kdeth_qp = 0;
14061 }
14062 if (kdeth_qp == 0) /* not set, or failed range check */
14063 kdeth_qp = DEFAULT_KDETH_QP;
14064
14065 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080014066 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14067 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014068
14069 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080014070 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14071 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014072}
14073
14074/**
14075 * init_qpmap_table
14076 * @dd - device data
14077 * @first_ctxt - first context
14078 * @last_ctxt - first context
14079 *
14080 * This return sets the qpn mapping table that
14081 * is indexed by qpn[8:1].
14082 *
14083 * The routine will round robin the 256 settings
14084 * from first_ctxt to last_ctxt.
14085 *
14086 * The first/last looks ahead to having specialized
14087 * receive contexts for mgmt and bypass. Normal
14088 * verbs traffic will assumed to be on a range
14089 * of receive contexts.
14090 */
14091static void init_qpmap_table(struct hfi1_devdata *dd,
14092 u32 first_ctxt,
14093 u32 last_ctxt)
14094{
14095 u64 reg = 0;
14096 u64 regno = RCV_QP_MAP_TABLE;
14097 int i;
14098 u64 ctxt = first_ctxt;
14099
Dean Luick60d585ad2016-04-12 10:50:35 -070014100 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014101 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014102 ctxt++;
14103 if (ctxt > last_ctxt)
14104 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070014105 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014106 write_csr(dd, regno, reg);
14107 reg = 0;
14108 regno += 8;
14109 }
14110 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040014111
14112 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14113 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14114}
14115
Dean Luick372cc85a2016-04-12 11:30:51 -070014116struct rsm_map_table {
14117 u64 map[NUM_MAP_REGS];
14118 unsigned int used;
14119};
14120
Dean Luickb12349a2016-04-12 11:31:33 -070014121struct rsm_rule_data {
14122 u8 offset;
14123 u8 pkt_type;
14124 u32 field1_off;
14125 u32 field2_off;
14126 u32 index1_off;
14127 u32 index1_width;
14128 u32 index2_off;
14129 u32 index2_width;
14130 u32 mask1;
14131 u32 value1;
14132 u32 mask2;
14133 u32 value2;
14134};
14135
Dean Luick372cc85a2016-04-12 11:30:51 -070014136/*
14137 * Return an initialized RMT map table for users to fill in. OK if it
14138 * returns NULL, indicating no table.
14139 */
14140static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14141{
14142 struct rsm_map_table *rmt;
14143 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14144
14145 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14146 if (rmt) {
14147 memset(rmt->map, rxcontext, sizeof(rmt->map));
14148 rmt->used = 0;
14149 }
14150
14151 return rmt;
14152}
14153
14154/*
14155 * Write the final RMT map table to the chip and free the table. OK if
14156 * table is NULL.
14157 */
14158static void complete_rsm_map_table(struct hfi1_devdata *dd,
14159 struct rsm_map_table *rmt)
14160{
14161 int i;
14162
14163 if (rmt) {
14164 /* write table to chip */
14165 for (i = 0; i < NUM_MAP_REGS; i++)
14166 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14167
14168 /* enable RSM */
14169 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14170 }
14171}
14172
Dean Luickb12349a2016-04-12 11:31:33 -070014173/*
14174 * Add a receive side mapping rule.
14175 */
14176static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14177 struct rsm_rule_data *rrd)
14178{
14179 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14180 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14181 1ull << rule_index | /* enable bit */
14182 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14183 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14184 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14185 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14186 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14187 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14188 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14189 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14190 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14191 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14192 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14193 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14194 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14195}
14196
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014197/*
14198 * Clear a receive side mapping rule.
14199 */
14200static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14201{
14202 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14203 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14204 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14205}
14206
Dean Luick4a818be2016-04-12 11:31:11 -070014207/* return the number of RSM map table entries that will be used for QOS */
14208static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14209 unsigned int *np)
14210{
14211 int i;
14212 unsigned int m, n;
14213 u8 max_by_vl = 0;
14214
14215 /* is QOS active at all? */
14216 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14217 num_vls == 1 ||
14218 krcvqsset <= 1)
14219 goto no_qos;
14220
14221 /* determine bits for qpn */
14222 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14223 if (krcvqs[i] > max_by_vl)
14224 max_by_vl = krcvqs[i];
14225 if (max_by_vl > 32)
14226 goto no_qos;
14227 m = ilog2(__roundup_pow_of_two(max_by_vl));
14228
14229 /* determine bits for vl */
14230 n = ilog2(__roundup_pow_of_two(num_vls));
14231
14232 /* reject if too much is used */
14233 if ((m + n) > 7)
14234 goto no_qos;
14235
14236 if (mp)
14237 *mp = m;
14238 if (np)
14239 *np = n;
14240
14241 return 1 << (m + n);
14242
14243no_qos:
14244 if (mp)
14245 *mp = 0;
14246 if (np)
14247 *np = 0;
14248 return 0;
14249}
14250
Mike Marciniszyn77241052015-07-30 15:17:43 -040014251/**
14252 * init_qos - init RX qos
14253 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070014254 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040014255 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014256 * This routine initializes Rule 0 and the RSM map table to implement
14257 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040014258 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014259 * If all of the limit tests succeed, qos is applied based on the array
14260 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014261 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014262 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14263 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014264 */
Dean Luick372cc85a2016-04-12 11:30:51 -070014265static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014266{
Dean Luickb12349a2016-04-12 11:31:33 -070014267 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014268 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070014269 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014270 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014271
Dean Luick4a818be2016-04-12 11:31:11 -070014272 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014273 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014274 rmt_entries = qos_rmt_entries(dd, &m, &n);
14275 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014276 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014277 qpns_per_vl = 1 << m;
14278
Dean Luick372cc85a2016-04-12 11:30:51 -070014279 /* enough room in the map table? */
14280 rmt_entries = 1 << (m + n);
14281 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050014282 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014283
Dean Luick372cc85a2016-04-12 11:30:51 -070014284 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070014285 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014286 unsigned tctxt;
14287
14288 for (qpn = 0, tctxt = ctxt;
14289 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14290 unsigned idx, regoff, regidx;
14291
Dean Luick372cc85a2016-04-12 11:30:51 -070014292 /* generate the index the hardware will produce */
14293 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014294 regoff = (idx % 8) * 8;
14295 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070014296 /* replace default with context number */
14297 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040014298 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14299 << regoff);
14300 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070014301 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014302 if (tctxt == ctxt + krcvqs[i])
14303 tctxt = ctxt;
14304 }
14305 ctxt += krcvqs[i];
14306 }
Dean Luickb12349a2016-04-12 11:31:33 -070014307
14308 rrd.offset = rmt->used;
14309 rrd.pkt_type = 2;
14310 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14311 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14312 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14313 rrd.index1_width = n;
14314 rrd.index2_off = QPN_SELECT_OFFSET;
14315 rrd.index2_width = m + n;
14316 rrd.mask1 = LRH_BTH_MASK;
14317 rrd.value1 = LRH_BTH_VALUE;
14318 rrd.mask2 = LRH_SC_MASK;
14319 rrd.value2 = LRH_SC_VALUE;
14320
14321 /* add rule 0 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014322 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
Dean Luickb12349a2016-04-12 11:31:33 -070014323
Dean Luick372cc85a2016-04-12 11:30:51 -070014324 /* mark RSM map entries as used */
14325 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070014326 /* map everything else to the mcast/err/vl15 context */
14327 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014328 dd->qos_shift = n + 1;
14329 return;
14330bail:
14331 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050014332 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014333}
14334
Dean Luick8f000f72016-04-12 11:32:06 -070014335static void init_user_fecn_handling(struct hfi1_devdata *dd,
14336 struct rsm_map_table *rmt)
14337{
14338 struct rsm_rule_data rrd;
14339 u64 reg;
14340 int i, idx, regoff, regidx;
14341 u8 offset;
14342
14343 /* there needs to be enough room in the map table */
14344 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14345 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14346 return;
14347 }
14348
14349 /*
14350 * RSM will extract the destination context as an index into the
14351 * map table. The destination contexts are a sequential block
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014352 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
Dean Luick8f000f72016-04-12 11:32:06 -070014353 * Map entries are accessed as offset + extracted value. Adjust
14354 * the added offset so this sequence can be placed anywhere in
14355 * the table - as long as the entries themselves do not wrap.
14356 * There are only enough bits in offset for the table size, so
14357 * start with that to allow for a "negative" offset.
14358 */
14359 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014360 (int)dd->first_dyn_alloc_ctxt);
Dean Luick8f000f72016-04-12 11:32:06 -070014361
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014362 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
Dean Luick8f000f72016-04-12 11:32:06 -070014363 i < dd->num_rcv_contexts; i++, idx++) {
14364 /* replace with identity mapping */
14365 regoff = (idx % 8) * 8;
14366 regidx = idx / 8;
14367 reg = rmt->map[regidx];
14368 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14369 reg |= (u64)i << regoff;
14370 rmt->map[regidx] = reg;
14371 }
14372
14373 /*
14374 * For RSM intercept of Expected FECN packets:
14375 * o packet type 0 - expected
14376 * o match on F (bit 95), using select/match 1, and
14377 * o match on SH (bit 133), using select/match 2.
14378 *
14379 * Use index 1 to extract the 8-bit receive context from DestQP
14380 * (start at bit 64). Use that as the RSM map table index.
14381 */
14382 rrd.offset = offset;
14383 rrd.pkt_type = 0;
14384 rrd.field1_off = 95;
14385 rrd.field2_off = 133;
14386 rrd.index1_off = 64;
14387 rrd.index1_width = 8;
14388 rrd.index2_off = 0;
14389 rrd.index2_width = 0;
14390 rrd.mask1 = 1;
14391 rrd.value1 = 1;
14392 rrd.mask2 = 1;
14393 rrd.value2 = 1;
14394
14395 /* add rule 1 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014396 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
Dean Luick8f000f72016-04-12 11:32:06 -070014397
14398 rmt->used += dd->num_user_contexts;
14399}
14400
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014401/* Initialize RSM for VNIC */
14402void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14403{
14404 u8 i, j;
14405 u8 ctx_id = 0;
14406 u64 reg;
14407 u32 regoff;
14408 struct rsm_rule_data rrd;
14409
14410 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14411 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14412 dd->vnic.rmt_start);
14413 return;
14414 }
14415
14416 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14417 dd->vnic.rmt_start,
14418 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14419
14420 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14421 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14422 reg = read_csr(dd, regoff);
14423 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14424 /* Update map register with vnic context */
14425 j = (dd->vnic.rmt_start + i) % 8;
14426 reg &= ~(0xffllu << (j * 8));
14427 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14428 /* Wrap up vnic ctx index */
14429 ctx_id %= dd->vnic.num_ctxt;
14430 /* Write back map register */
14431 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14432 dev_dbg(&(dd)->pcidev->dev,
14433 "Vnic rsm map reg[%d] =0x%llx\n",
14434 regoff - RCV_RSM_MAP_TABLE, reg);
14435
14436 write_csr(dd, regoff, reg);
14437 regoff += 8;
14438 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14439 reg = read_csr(dd, regoff);
14440 }
14441 }
14442
14443 /* Add rule for vnic */
14444 rrd.offset = dd->vnic.rmt_start;
14445 rrd.pkt_type = 4;
14446 /* Match 16B packets */
14447 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14448 rrd.mask1 = L2_TYPE_MASK;
14449 rrd.value1 = L2_16B_VALUE;
14450 /* Match ETH L4 packets */
14451 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14452 rrd.mask2 = L4_16B_TYPE_MASK;
14453 rrd.value2 = L4_16B_ETH_VALUE;
14454 /* Calc context from veswid and entropy */
14455 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14456 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14457 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14458 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14459 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14460
14461 /* Enable RSM if not already enabled */
14462 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14463}
14464
14465void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14466{
14467 clear_rsm_rule(dd, RSM_INS_VNIC);
14468
14469 /* Disable RSM if used only by vnic */
14470 if (dd->vnic.rmt_start == 0)
14471 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14472}
14473
Mike Marciniszyn77241052015-07-30 15:17:43 -040014474static void init_rxe(struct hfi1_devdata *dd)
14475{
Dean Luick372cc85a2016-04-12 11:30:51 -070014476 struct rsm_map_table *rmt;
Don Hiatt72c07e22017-08-04 13:53:58 -070014477 u64 val;
Dean Luick372cc85a2016-04-12 11:30:51 -070014478
Mike Marciniszyn77241052015-07-30 15:17:43 -040014479 /* enable all receive errors */
14480 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070014481
14482 rmt = alloc_rsm_map_table(dd);
14483 /* set up QOS, including the QPN map table */
14484 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070014485 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070014486 complete_rsm_map_table(dd, rmt);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014487 /* record number of used rsm map entries for vnic */
14488 dd->vnic.rmt_start = rmt->used;
Dean Luick372cc85a2016-04-12 11:30:51 -070014489 kfree(rmt);
14490
Mike Marciniszyn77241052015-07-30 15:17:43 -040014491 /*
14492 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14493 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14494 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14495 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14496 * Max_PayLoad_Size set to its minimum of 128.
14497 *
14498 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14499 * (64 bytes). Max_Payload_Size is possibly modified upward in
14500 * tune_pcie_caps() which is called after this routine.
14501 */
Don Hiatt72c07e22017-08-04 13:53:58 -070014502
14503 /* Have 16 bytes (4DW) of bypass header available in header queue */
14504 val = read_csr(dd, RCV_BYPASS);
14505 val |= (4ull << 16);
14506 write_csr(dd, RCV_BYPASS, val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014507}
14508
14509static void init_other(struct hfi1_devdata *dd)
14510{
14511 /* enable all CCE errors */
14512 write_csr(dd, CCE_ERR_MASK, ~0ull);
14513 /* enable *some* Misc errors */
14514 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14515 /* enable all DC errors, except LCB */
14516 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14517 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14518}
14519
14520/*
14521 * Fill out the given AU table using the given CU. A CU is defined in terms
14522 * AUs. The table is a an encoding: given the index, how many AUs does that
14523 * represent?
14524 *
14525 * NOTE: Assumes that the register layout is the same for the
14526 * local and remote tables.
14527 */
14528static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14529 u32 csr0to3, u32 csr4to7)
14530{
14531 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080014532 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14533 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14534 2ull * cu <<
14535 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14536 4ull * cu <<
14537 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014538 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080014539 8ull * cu <<
14540 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14541 16ull * cu <<
14542 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14543 32ull * cu <<
14544 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14545 64ull * cu <<
14546 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014547}
14548
14549static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14550{
14551 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014552 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014553}
14554
14555void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14556{
14557 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014558 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014559}
14560
14561static void init_txe(struct hfi1_devdata *dd)
14562{
14563 int i;
14564
14565 /* enable all PIO, SDMA, general, and Egress errors */
14566 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14567 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14568 write_csr(dd, SEND_ERR_MASK, ~0ull);
14569 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14570
14571 /* enable all per-context and per-SDMA engine errors */
14572 for (i = 0; i < dd->chip_send_contexts; i++)
14573 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14574 for (i = 0; i < dd->chip_sdma_engines; i++)
14575 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14576
14577 /* set the local CU to AU mapping */
14578 assign_local_cm_au_table(dd, dd->vcu);
14579
14580 /*
14581 * Set reasonable default for Credit Return Timer
14582 * Don't set on Simulator - causes it to choke.
14583 */
14584 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14585 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14586}
14587
Michael J. Ruhl17573972017-07-24 07:46:01 -070014588int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14589 u16 jkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014590{
Michael J. Ruhl17573972017-07-24 07:46:01 -070014591 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014592 u64 reg;
14593
Michael J. Ruhl17573972017-07-24 07:46:01 -070014594 if (!rcd || !rcd->sc)
14595 return -EINVAL;
14596
14597 hw_ctxt = rcd->sc->hw_context;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014598 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14599 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14600 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14601 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14602 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14603 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014604 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014605 /*
14606 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014607 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014608 if (!is_ax(dd)) {
Michael J. Ruhl17573972017-07-24 07:46:01 -070014609 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014610 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014611 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014612 }
14613
14614 /* Enable J_KEY check on receive context. */
14615 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14616 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14617 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
Michael J. Ruhl17573972017-07-24 07:46:01 -070014618 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14619
14620 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014621}
14622
Michael J. Ruhl17573972017-07-24 07:46:01 -070014623int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014624{
Michael J. Ruhl17573972017-07-24 07:46:01 -070014625 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014626 u64 reg;
14627
Michael J. Ruhl17573972017-07-24 07:46:01 -070014628 if (!rcd || !rcd->sc)
14629 return -EINVAL;
14630
14631 hw_ctxt = rcd->sc->hw_context;
14632 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014633 /*
14634 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14635 * This check would not have been enabled for A0 h/w, see
14636 * set_ctxt_jkey().
14637 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014638 if (!is_ax(dd)) {
Michael J. Ruhl17573972017-07-24 07:46:01 -070014639 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014640 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014641 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014642 }
14643 /* Turn off the J_KEY on the receive side */
Michael J. Ruhl17573972017-07-24 07:46:01 -070014644 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14645
14646 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014647}
14648
Michael J. Ruhl17573972017-07-24 07:46:01 -070014649int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14650 u16 pkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014651{
Michael J. Ruhl17573972017-07-24 07:46:01 -070014652 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014653 u64 reg;
14654
Michael J. Ruhl17573972017-07-24 07:46:01 -070014655 if (!rcd || !rcd->sc)
14656 return -EINVAL;
14657
14658 hw_ctxt = rcd->sc->hw_context;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014659 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14660 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014661 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14662 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014663 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014664 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014665 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14666
14667 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014668}
14669
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014670int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014671{
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014672 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014673 u64 reg;
14674
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014675 if (!ctxt || !ctxt->sc)
14676 return -EINVAL;
14677
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014678 hw_ctxt = ctxt->sc->hw_context;
14679 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014680 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014681 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14682 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14683
14684 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014685}
14686
14687/*
14688 * Start doing the clean up the the chip. Our clean up happens in multiple
14689 * stages and this is just the first.
14690 */
14691void hfi1_start_cleanup(struct hfi1_devdata *dd)
14692{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014693 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014694 free_cntrs(dd);
14695 free_rcverr(dd);
14696 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014697 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014698}
14699
14700#define HFI_BASE_GUID(dev) \
14701 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14702
14703/*
Dean Luick78eb1292016-03-05 08:49:45 -080014704 * Information can be shared between the two HFIs on the same ASIC
14705 * in the same OS. This function finds the peer device and sets
14706 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014707 */
Dean Luick78eb1292016-03-05 08:49:45 -080014708static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014709{
14710 unsigned long flags;
14711 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014712 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014713 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014714
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014715 /* pre-allocate the asic structure in case we are the first device */
14716 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14717 if (!asic_data)
14718 return -ENOMEM;
14719
Mike Marciniszyn77241052015-07-30 15:17:43 -040014720 spin_lock_irqsave(&hfi1_devs_lock, flags);
14721 /* Find our peer device */
14722 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14723 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14724 dd->unit != tmp->unit) {
14725 peer = tmp;
14726 break;
14727 }
14728 }
14729
Dean Luick78eb1292016-03-05 08:49:45 -080014730 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014731 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014732 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014733 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014734 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014735 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014736 mutex_init(&dd->asic_data->asic_resource_mutex);
14737 }
14738 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014739 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -040014740
14741 /* first one through - set up i2c devices */
14742 if (!peer)
14743 ret = set_up_i2c(dd, dd->asic_data);
14744
Dean Luick78eb1292016-03-05 08:49:45 -080014745 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014746}
14747
Dean Luick5d9157a2015-11-16 21:59:34 -050014748/*
14749 * Set dd->boardname. Use a generic name if a name is not returned from
14750 * EFI variable space.
14751 *
14752 * Return 0 on success, -ENOMEM if space could not be allocated.
14753 */
14754static int obtain_boardname(struct hfi1_devdata *dd)
14755{
14756 /* generic board description */
14757 const char generic[] =
14758 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14759 unsigned long size;
14760 int ret;
14761
14762 ret = read_hfi1_efi_var(dd, "description", &size,
14763 (void **)&dd->boardname);
14764 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014765 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014766 /* use generic description */
14767 dd->boardname = kstrdup(generic, GFP_KERNEL);
14768 if (!dd->boardname)
14769 return -ENOMEM;
14770 }
14771 return 0;
14772}
14773
Kaike Wan24487dd2016-02-26 13:33:23 -080014774/*
14775 * Check the interrupt registers to make sure that they are mapped correctly.
14776 * It is intended to help user identify any mismapping by VMM when the driver
14777 * is running in a VM. This function should only be called before interrupt
14778 * is set up properly.
14779 *
14780 * Return 0 on success, -EINVAL on failure.
14781 */
14782static int check_int_registers(struct hfi1_devdata *dd)
14783{
14784 u64 reg;
14785 u64 all_bits = ~(u64)0;
14786 u64 mask;
14787
14788 /* Clear CceIntMask[0] to avoid raising any interrupts */
14789 mask = read_csr(dd, CCE_INT_MASK);
14790 write_csr(dd, CCE_INT_MASK, 0ull);
14791 reg = read_csr(dd, CCE_INT_MASK);
14792 if (reg)
14793 goto err_exit;
14794
14795 /* Clear all interrupt status bits */
14796 write_csr(dd, CCE_INT_CLEAR, all_bits);
14797 reg = read_csr(dd, CCE_INT_STATUS);
14798 if (reg)
14799 goto err_exit;
14800
14801 /* Set all interrupt status bits */
14802 write_csr(dd, CCE_INT_FORCE, all_bits);
14803 reg = read_csr(dd, CCE_INT_STATUS);
14804 if (reg != all_bits)
14805 goto err_exit;
14806
14807 /* Restore the interrupt mask */
14808 write_csr(dd, CCE_INT_CLEAR, all_bits);
14809 write_csr(dd, CCE_INT_MASK, mask);
14810
14811 return 0;
14812err_exit:
14813 write_csr(dd, CCE_INT_MASK, mask);
14814 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14815 return -EINVAL;
14816}
14817
Mike Marciniszyn77241052015-07-30 15:17:43 -040014818/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014819 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014820 * @dev: the pci_dev for hfi1_ib device
14821 * @ent: pci_device_id struct for this dev
14822 *
14823 * Also allocates, initializes, and returns the devdata struct for this
14824 * device instance
14825 *
14826 * This is global, and is called directly at init to set up the
14827 * chip-specific function pointers for later use.
14828 */
14829struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14830 const struct pci_device_id *ent)
14831{
14832 struct hfi1_devdata *dd;
14833 struct hfi1_pportdata *ppd;
14834 u64 reg;
14835 int i, ret;
14836 static const char * const inames[] = { /* implementation names */
14837 "RTL silicon",
14838 "RTL VCS simulation",
14839 "RTL FPGA emulation",
14840 "Functional simulator"
14841 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014842 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014843
Jubin John17fb4f22016-02-14 20:21:52 -080014844 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14845 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014846 if (IS_ERR(dd))
14847 goto bail;
14848 ppd = dd->pport;
14849 for (i = 0; i < dd->num_pports; i++, ppd++) {
14850 int vl;
14851 /* init common fields */
14852 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14853 /* DC supports 4 link widths */
14854 ppd->link_width_supported =
14855 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14856 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14857 ppd->link_width_downgrade_supported =
14858 ppd->link_width_supported;
14859 /* start out enabling only 4X */
14860 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14861 ppd->link_width_downgrade_enabled =
14862 ppd->link_width_downgrade_supported;
14863 /* link width active is 0 when link is down */
14864 /* link width downgrade active is 0 when link is down */
14865
Jubin Johnd0d236e2016-02-14 20:20:15 -080014866 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14867 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014868 hfi1_early_err(&pdev->dev,
14869 "Invalid num_vls %u, using %u VLs\n",
14870 num_vls, HFI1_MAX_VLS_SUPPORTED);
14871 num_vls = HFI1_MAX_VLS_SUPPORTED;
14872 }
14873 ppd->vls_supported = num_vls;
14874 ppd->vls_operational = ppd->vls_supported;
14875 /* Set the default MTU. */
14876 for (vl = 0; vl < num_vls; vl++)
14877 dd->vld[vl].mtu = hfi1_max_mtu;
14878 dd->vld[15].mtu = MAX_MAD_PACKET;
14879 /*
14880 * Set the initial values to reasonable default, will be set
14881 * for real when link is up.
14882 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014883 ppd->overrun_threshold = 0x4;
14884 ppd->phy_error_threshold = 0xf;
14885 ppd->port_crc_mode_enabled = link_crc_mask;
14886 /* initialize supported LTP CRC mode */
14887 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14888 /* initialize enabled LTP CRC mode */
14889 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14890 /* start in offline */
14891 ppd->host_link_state = HLS_DN_OFFLINE;
14892 init_vl_arb_caches(ppd);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070014893 ppd->pstate = PLS_OFFLINE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014894 }
14895
14896 dd->link_default = HLS_DN_POLL;
14897
14898 /*
14899 * Do remaining PCIe setup and save PCIe values in dd.
14900 * Any error printing is already done by the init code.
14901 * On return, we have the chip mapped.
14902 */
Easwar Hariharan26ea2542016-10-17 04:19:58 -070014903 ret = hfi1_pcie_ddinit(dd, pdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014904 if (ret < 0)
14905 goto bail_free;
14906
Bartlomiej Dudeka618b7e2017-07-24 07:46:30 -070014907 /* Save PCI space registers to rewrite after device reset */
14908 ret = save_pci_variables(dd);
14909 if (ret < 0)
14910 goto bail_cleanup;
14911
Mike Marciniszyn77241052015-07-30 15:17:43 -040014912 /* verify that reads actually work, save revision for reset check */
14913 dd->revision = read_csr(dd, CCE_REVISION);
14914 if (dd->revision == ~(u64)0) {
14915 dd_dev_err(dd, "cannot read chip CSRs\n");
14916 ret = -EINVAL;
14917 goto bail_cleanup;
14918 }
14919 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14920 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14921 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14922 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14923
Jubin John4d114fd2016-02-14 20:21:43 -080014924 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014925 * Check interrupt registers mapping if the driver has no access to
14926 * the upstream component. In this case, it is likely that the driver
14927 * is running in a VM.
14928 */
14929 if (!parent) {
14930 ret = check_int_registers(dd);
14931 if (ret)
14932 goto bail_cleanup;
14933 }
14934
14935 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014936 * obtain the hardware ID - NOT related to unit, which is a
14937 * software enumeration
14938 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014939 reg = read_csr(dd, CCE_REVISION2);
14940 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14941 & CCE_REVISION2_HFI_ID_MASK;
14942 /* the variable size will remove unwanted bits */
14943 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14944 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14945 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014946 dd->icode < ARRAY_SIZE(inames) ?
14947 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014948
14949 /* speeds the hardware can support */
14950 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14951 /* speeds allowed to run at */
14952 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14953 /* give a reasonable active value, will be set on link up */
14954 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14955
14956 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14957 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14958 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14959 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14960 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14961 /* fix up link widths for emulation _p */
14962 ppd = dd->pport;
14963 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14964 ppd->link_width_supported =
14965 ppd->link_width_enabled =
14966 ppd->link_width_downgrade_supported =
14967 ppd->link_width_downgrade_enabled =
14968 OPA_LINK_WIDTH_1X;
14969 }
14970 /* insure num_vls isn't larger than number of sdma engines */
14971 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14972 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014973 num_vls, dd->chip_sdma_engines);
14974 num_vls = dd->chip_sdma_engines;
14975 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014976 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014977 }
14978
14979 /*
14980 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14981 * Limit the max if larger than the field holds. If timeout is
14982 * non-zero, then the calculated field will be at least 1.
14983 *
14984 * Must be after icode is set up - the cclock rate depends
14985 * on knowing the hardware being used.
14986 */
14987 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14988 if (dd->rcv_intr_timeout_csr >
14989 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14990 dd->rcv_intr_timeout_csr =
14991 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14992 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14993 dd->rcv_intr_timeout_csr = 1;
14994
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014995 /* needs to be done before we look for the peer device */
14996 read_guid(dd);
14997
Dean Luick78eb1292016-03-05 08:49:45 -080014998 /* set up shared ASIC data with peer device */
14999 ret = init_asic_data(dd);
15000 if (ret)
15001 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040015002
Mike Marciniszyn77241052015-07-30 15:17:43 -040015003 /* obtain chip sizes, reset chip CSRs */
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070015004 ret = init_chip(dd);
15005 if (ret)
15006 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015007
15008 /* read in the PCIe link speed information */
15009 ret = pcie_speeds(dd);
15010 if (ret)
15011 goto bail_cleanup;
15012
Dean Luicke83eba22016-09-30 04:41:45 -070015013 /* call before get_platform_config(), after init_chip_resources() */
15014 ret = eprom_init(dd);
15015 if (ret)
15016 goto bail_free_rcverr;
15017
Easwar Hariharanc3838b32016-02-09 14:29:13 -080015018 /* Needs to be called before hfi1_firmware_init */
15019 get_platform_config(dd);
15020
Mike Marciniszyn77241052015-07-30 15:17:43 -040015021 /* read in firmware */
15022 ret = hfi1_firmware_init(dd);
15023 if (ret)
15024 goto bail_cleanup;
15025
15026 /*
15027 * In general, the PCIe Gen3 transition must occur after the
15028 * chip has been idled (so it won't initiate any PCIe transactions
15029 * e.g. an interrupt) and before the driver changes any registers
15030 * (the transition will reset the registers).
15031 *
15032 * In particular, place this call after:
15033 * - init_chip() - the chip will not initiate any PCIe transactions
15034 * - pcie_speeds() - reads the current link speed
15035 * - hfi1_firmware_init() - the needed firmware is ready to be
15036 * downloaded
15037 */
15038 ret = do_pcie_gen3_transition(dd);
15039 if (ret)
15040 goto bail_cleanup;
15041
15042 /* start setting dd values and adjusting CSRs */
15043 init_early_variables(dd);
15044
15045 parse_platform_config(dd);
15046
Dean Luick5d9157a2015-11-16 21:59:34 -050015047 ret = obtain_boardname(dd);
15048 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040015049 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015050
15051 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050015052 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040015053 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040015054 (u32)dd->majrev,
15055 (u32)dd->minrev,
15056 (dd->revision >> CCE_REVISION_SW_SHIFT)
15057 & CCE_REVISION_SW_MASK);
15058
15059 ret = set_up_context_variables(dd);
15060 if (ret)
15061 goto bail_cleanup;
15062
15063 /* set initial RXE CSRs */
15064 init_rxe(dd);
15065 /* set initial TXE CSRs */
15066 init_txe(dd);
15067 /* set initial non-RXE, non-TXE CSRs */
15068 init_other(dd);
15069 /* set up KDETH QP prefix in both RX and TX CSRs */
15070 init_kdeth_qp(dd);
15071
Dennis Dalessandro41973442016-07-25 07:52:36 -070015072 ret = hfi1_dev_affinity_init(dd);
15073 if (ret)
15074 goto bail_cleanup;
Mitko Haralanov957558c2016-02-03 14:33:40 -080015075
Mike Marciniszyn77241052015-07-30 15:17:43 -040015076 /* send contexts must be set up before receive contexts */
15077 ret = init_send_contexts(dd);
15078 if (ret)
15079 goto bail_cleanup;
15080
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070015081 ret = hfi1_create_kctxts(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015082 if (ret)
15083 goto bail_cleanup;
15084
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070015085 /*
15086 * Initialize aspm, to be done after gen3 transition and setting up
15087 * contexts and before enabling interrupts
15088 */
15089 aspm_init(dd);
15090
Mike Marciniszyn77241052015-07-30 15:17:43 -040015091 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15092 /*
15093 * rcd[0] is guaranteed to be valid by this point. Also, all
15094 * context are using the same value, as per the module parameter.
15095 */
15096 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15097
15098 ret = init_pervl_scs(dd);
15099 if (ret)
15100 goto bail_cleanup;
15101
15102 /* sdma init */
15103 for (i = 0; i < dd->num_pports; ++i) {
15104 ret = sdma_init(dd, i);
15105 if (ret)
15106 goto bail_cleanup;
15107 }
15108
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070015109 /* use contexts created by hfi1_create_kctxts */
Mike Marciniszyn77241052015-07-30 15:17:43 -040015110 ret = set_up_interrupts(dd);
15111 if (ret)
15112 goto bail_cleanup;
15113
15114 /* set up LCB access - must be after set_up_interrupts() */
15115 init_lcb_access(dd);
15116
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015117 /*
15118 * Serial number is created from the base guid:
15119 * [27:24] = base guid [38:35]
15120 * [23: 0] = base guid [23: 0]
15121 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040015122 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015123 (dd->base_guid & 0xFFFFFF) |
15124 ((dd->base_guid >> 11) & 0xF000000));
Mike Marciniszyn77241052015-07-30 15:17:43 -040015125
15126 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15127 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15128 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15129
15130 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15131 if (ret)
15132 goto bail_clear_intr;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015133
15134 thermal_init(dd);
15135
15136 ret = init_cntrs(dd);
15137 if (ret)
15138 goto bail_clear_intr;
15139
15140 ret = init_rcverr(dd);
15141 if (ret)
15142 goto bail_free_cntrs;
15143
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -070015144 init_completion(&dd->user_comp);
15145
15146 /* The user refcount starts with one to inidicate an active device */
15147 atomic_set(&dd->user_refcount, 1);
15148
Mike Marciniszyn77241052015-07-30 15:17:43 -040015149 goto bail;
15150
15151bail_free_rcverr:
15152 free_rcverr(dd);
15153bail_free_cntrs:
15154 free_cntrs(dd);
15155bail_clear_intr:
15156 clean_up_interrupts(dd);
15157bail_cleanup:
15158 hfi1_pcie_ddcleanup(dd);
15159bail_free:
15160 hfi1_free_devdata(dd);
15161 dd = ERR_PTR(ret);
15162bail:
15163 return dd;
15164}
15165
15166static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15167 u32 dw_len)
15168{
15169 u32 delta_cycles;
15170 u32 current_egress_rate = ppd->current_egress_rate;
15171 /* rates here are in units of 10^6 bits/sec */
15172
15173 if (desired_egress_rate == -1)
15174 return 0; /* shouldn't happen */
15175
15176 if (desired_egress_rate >= current_egress_rate)
15177 return 0; /* we can't help go faster, only slower */
15178
15179 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15180 egress_cycles(dw_len * 4, current_egress_rate);
15181
15182 return (u16)delta_cycles;
15183}
15184
Mike Marciniszyn77241052015-07-30 15:17:43 -040015185/**
15186 * create_pbc - build a pbc for transmission
15187 * @flags: special case flags or-ed in built pbc
15188 * @srate: static rate
15189 * @vl: vl
15190 * @dwlen: dword length (header words + data words + pbc words)
15191 *
15192 * Create a PBC with the given flags, rate, VL, and length.
15193 *
15194 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15195 * for verbs, which does not use this PSM feature. The lone other caller
15196 * is for the diagnostic interface which calls this if the user does not
15197 * supply their own PBC.
15198 */
15199u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15200 u32 dw_len)
15201{
15202 u64 pbc, delay = 0;
15203
15204 if (unlikely(srate_mbs))
15205 delay = delay_cycles(ppd, srate_mbs, dw_len);
15206
15207 pbc = flags
15208 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15209 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15210 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15211 | (dw_len & PBC_LENGTH_DWS_MASK)
15212 << PBC_LENGTH_DWS_SHIFT;
15213
15214 return pbc;
15215}
15216
15217#define SBUS_THERMAL 0x4f
15218#define SBUS_THERM_MONITOR_MODE 0x1
15219
15220#define THERM_FAILURE(dev, ret, reason) \
15221 dd_dev_err((dd), \
15222 "Thermal sensor initialization failed: %s (%d)\n", \
15223 (reason), (ret))
15224
15225/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070015226 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040015227 *
15228 * After initialization, enable polling of thermal sensor through
15229 * SBus interface. In order for this to work, the SBus Master
15230 * firmware has to be loaded due to the fact that the HW polling
15231 * logic uses SBus interrupts, which are not supported with
15232 * default firmware. Otherwise, no data will be returned through
15233 * the ASIC_STS_THERM CSR.
15234 */
15235static int thermal_init(struct hfi1_devdata *dd)
15236{
15237 int ret = 0;
15238
15239 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080015240 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040015241 return ret;
15242
Dean Luick576531f2016-03-05 08:50:01 -080015243 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15244 if (ret) {
15245 THERM_FAILURE(dd, ret, "Acquire SBus");
15246 return ret;
15247 }
15248
Mike Marciniszyn77241052015-07-30 15:17:43 -040015249 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050015250 /* Disable polling of thermal readings */
15251 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15252 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015253 /* Thermal Sensor Initialization */
15254 /* Step 1: Reset the Thermal SBus Receiver */
15255 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15256 RESET_SBUS_RECEIVER, 0);
15257 if (ret) {
15258 THERM_FAILURE(dd, ret, "Bus Reset");
15259 goto done;
15260 }
15261 /* Step 2: Set Reset bit in Thermal block */
15262 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15263 WRITE_SBUS_RECEIVER, 0x1);
15264 if (ret) {
15265 THERM_FAILURE(dd, ret, "Therm Block Reset");
15266 goto done;
15267 }
15268 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15269 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15270 WRITE_SBUS_RECEIVER, 0x32);
15271 if (ret) {
15272 THERM_FAILURE(dd, ret, "Write Clock Div");
15273 goto done;
15274 }
15275 /* Step 4: Select temperature mode */
15276 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15277 WRITE_SBUS_RECEIVER,
15278 SBUS_THERM_MONITOR_MODE);
15279 if (ret) {
15280 THERM_FAILURE(dd, ret, "Write Mode Sel");
15281 goto done;
15282 }
15283 /* Step 5: De-assert block reset and start conversion */
15284 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15285 WRITE_SBUS_RECEIVER, 0x2);
15286 if (ret) {
15287 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15288 goto done;
15289 }
15290 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15291 msleep(22);
15292
15293 /* Enable polling of thermal readings */
15294 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080015295
15296 /* Set initialized flag */
15297 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15298 if (ret)
15299 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15300
Mike Marciniszyn77241052015-07-30 15:17:43 -040015301done:
Dean Luick576531f2016-03-05 08:50:01 -080015302 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015303 return ret;
15304}
15305
15306static void handle_temp_err(struct hfi1_devdata *dd)
15307{
15308 struct hfi1_pportdata *ppd = &dd->pport[0];
15309 /*
15310 * Thermal Critical Interrupt
15311 * Put the device into forced freeze mode, take link down to
15312 * offline, and put DC into reset.
15313 */
15314 dd_dev_emerg(dd,
15315 "Critical temperature reached! Forcing device into freeze mode!\n");
15316 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080015317 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015318 /*
15319 * Shut DC down as much and as quickly as possible.
15320 *
15321 * Step 1: Take the link down to OFFLINE. This will cause the
15322 * 8051 to put the Serdes in reset. However, we don't want to
15323 * go through the entire link state machine since we want to
15324 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15325 * but rather an attempt to save the chip.
15326 * Code below is almost the same as quiet_serdes() but avoids
15327 * all the extra work and the sleeps.
15328 */
15329 ppd->driver_link_ready = 0;
15330 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080015331 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15332 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015333 /*
15334 * Step 2: Shutdown LCB and 8051
15335 * After shutdown, do not restore DC_CFG_RESET value.
15336 */
15337 dc_shutdown(dd);
15338}