blob: 97ce886bb17154357b1b54ab5f1e6b19251640c5 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040066
67#define NUM_IB_PORTS 1
68
69uint kdeth_qp;
70module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74module_param(num_vls, uint, S_IRUGO);
75MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77/*
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
83 */
84uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85module_param(rcv_intr_timeout, uint, S_IRUGO);
86MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88uint rcv_intr_count = 16; /* same as qib */
89module_param(rcv_intr_count, uint, S_IRUGO);
90MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92ushort link_crc_mask = SUPPORTED_CRCS;
93module_param(link_crc_mask, ushort, S_IRUGO);
94MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96uint loopback;
97module_param_named(loopback, loopback, uint, S_IRUGO);
98MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100/* Other driver tunables */
101uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102static ushort crc_14b_sideband = 1;
103static uint use_flr = 1;
104uint quick_linkup; /* skip LNI */
105
106struct flag_table {
107 u64 flag; /* the flag */
108 char *str; /* description string */
109 u16 extra; /* extra information */
110 u16 unused0;
111 u32 unused1;
112};
113
114/* str must be a string constant */
115#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116#define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118/* Send Error Consequences */
119#define SEC_WRITE_DROPPED 0x1
120#define SEC_PACKET_DROPPED 0x2
121#define SEC_SC_HALTED 0x4 /* per-context only */
122#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
123
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500125#define FIRST_KERNEL_KCTXT 1
Dean Luick372cc85a2016-04-12 11:30:51 -0700126/* sizes for both the QP and RSM map tables */
127#define NUM_MAP_ENTRIES 256
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
Jakub Pawlak2b719042016-07-01 16:01:22 -0700241#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
242#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
243
Mike Marciniszyn77241052015-07-30 15:17:43 -0400244/*
245 * CCE Error flags.
246 */
247static struct flag_table cce_err_status_flags[] = {
248/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
249 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
250/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
252/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
253 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
254/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
255 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
256/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
257 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
258/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
259 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
260/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
261 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
262/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
263 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
264/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
265 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
266/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
268/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
270/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
272/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
273 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
274/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
276/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
277 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
278/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
280/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
282/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
284/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
285 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
286/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
288/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
289 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
290/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
292/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
293 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
294/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
296/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
297 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
298/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
300/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
301 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
302/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
304/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
305 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
306/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
307 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
308/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
309 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
310/*31*/ FLAG_ENTRY0("LATriggered",
311 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
312/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
314/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
315 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
316/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
317 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
318/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
319 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
320/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
322/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
323 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
324/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
326/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
327 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
328/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
329 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
330/*41-63 reserved*/
331};
332
333/*
334 * Misc Error flags
335 */
336#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
337static struct flag_table misc_err_status_flags[] = {
338/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
339/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
340/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
341/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
342/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
343/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
344/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
345/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
346/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
347/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
348/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
349/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
350/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
351};
352
353/*
354 * TXE PIO Error flags and consequences
355 */
356static struct flag_table pio_err_status_flags[] = {
357/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
358 SEC_WRITE_DROPPED,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
360/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
363/* 2*/ FLAG_ENTRY("PioCsrParity",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
366/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
369/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
372/* 5*/ FLAG_ENTRY("PioPccFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
375/* 6*/ FLAG_ENTRY("PioPecFifoParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
378/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
381/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
384/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
387/*10*/ FLAG_ENTRY("PioSmPktResetParity",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
390/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
393/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
394 SEC_SPC_FREEZE,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
396/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
399/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
400 0,
401 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
402/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
405/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
406 SEC_SPC_FREEZE,
407 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
408/*17*/ FLAG_ENTRY("PioInitSmIn",
409 0,
410 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
411/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
414/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
415 SEC_SPC_FREEZE,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
417/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
418 0,
419 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
420/*21*/ FLAG_ENTRY("PioWriteDataParity",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
423/*22*/ FLAG_ENTRY("PioStateMachine",
424 SEC_SPC_FREEZE,
425 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
426/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800427 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400428 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
429/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800430 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400431 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
432/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
435/*26*/ FLAG_ENTRY("PioVlfSopParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
438/*27*/ FLAG_ENTRY("PioVlFifoParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
441/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
444/*29*/ FLAG_ENTRY("PioPpmcSopLen",
445 SEC_SPC_FREEZE,
446 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
447/*30-31 reserved*/
448/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
451/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
454/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
457/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
458 SEC_SPC_FREEZE,
459 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
460/*36-63 reserved*/
461};
462
463/* TXE PIO errors that cause an SPC freeze */
464#define ALL_PIO_FREEZE_ERR \
465 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
494
495/*
496 * TXE SDMA Error flags
497 */
498static struct flag_table sdma_err_status_flags[] = {
499/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
500 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
501/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
502 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
503/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
505/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
506 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
507/*04-63 reserved*/
508};
509
510/* TXE SDMA errors that cause an SPC freeze */
511#define ALL_SDMA_FREEZE_ERR \
512 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
513 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
514 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
515
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800516/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
517#define PORT_DISCARD_EGRESS_ERRS \
518 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
519 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
520 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
521
Mike Marciniszyn77241052015-07-30 15:17:43 -0400522/*
523 * TXE Egress Error flags
524 */
525#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
526static struct flag_table egress_err_status_flags[] = {
527/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
528/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
529/* 2 reserved */
530/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
531 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
532/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
533/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
534/* 6 reserved */
535/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
536 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
537/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
538 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
539/* 9-10 reserved */
540/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
541 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
542/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
543/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
544/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
545/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
546/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
547 SEES(TX_SDMA0_DISALLOWED_PACKET)),
548/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
549 SEES(TX_SDMA1_DISALLOWED_PACKET)),
550/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
551 SEES(TX_SDMA2_DISALLOWED_PACKET)),
552/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
553 SEES(TX_SDMA3_DISALLOWED_PACKET)),
554/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
555 SEES(TX_SDMA4_DISALLOWED_PACKET)),
556/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
557 SEES(TX_SDMA5_DISALLOWED_PACKET)),
558/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
559 SEES(TX_SDMA6_DISALLOWED_PACKET)),
560/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
561 SEES(TX_SDMA7_DISALLOWED_PACKET)),
562/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
563 SEES(TX_SDMA8_DISALLOWED_PACKET)),
564/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
565 SEES(TX_SDMA9_DISALLOWED_PACKET)),
566/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
567 SEES(TX_SDMA10_DISALLOWED_PACKET)),
568/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
569 SEES(TX_SDMA11_DISALLOWED_PACKET)),
570/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
571 SEES(TX_SDMA12_DISALLOWED_PACKET)),
572/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
573 SEES(TX_SDMA13_DISALLOWED_PACKET)),
574/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
575 SEES(TX_SDMA14_DISALLOWED_PACKET)),
576/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
577 SEES(TX_SDMA15_DISALLOWED_PACKET)),
578/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
580/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
582/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
584/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
586/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
588/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
590/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
592/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
594/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
595 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
596/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
597/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
598/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
599/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
600/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
601/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
602/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
603/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
604/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
605/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
606/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
607/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
608/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
609/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
610/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
611/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
612/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
613/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
614/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
615/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
616/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
617/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
618 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
619/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
620 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
621};
622
623/*
624 * TXE Egress Error Info flags
625 */
626#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
627static struct flag_table egress_err_info_flags[] = {
628/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
629/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
630/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
631/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
632/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
633/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
634/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
635/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
636/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
637/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
638/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
639/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
640/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
641/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
642/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
643/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
644/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
645/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
646/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
647/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
648/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
649/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
650};
651
652/* TXE Egress errors that cause an SPC freeze */
653#define ALL_TXE_EGRESS_FREEZE_ERR \
654 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
655 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
656 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
657 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
658 | SEES(TX_LAUNCH_CSR_PARITY) \
659 | SEES(TX_SBRD_CTL_CSR_PARITY) \
660 | SEES(TX_CONFIG_PARITY) \
661 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
670 | SEES(TX_CREDIT_RETURN_PARITY))
671
672/*
673 * TXE Send error flags
674 */
675#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
676static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500677/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400678/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
679/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
680};
681
682/*
683 * TXE Send Context Error flags and consequences
684 */
685static struct flag_table sc_err_status_flags[] = {
686/* 0*/ FLAG_ENTRY("InconsistentSop",
687 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
689/* 1*/ FLAG_ENTRY("DisallowedPacket",
690 SEC_PACKET_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
692/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
693 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
695/* 3*/ FLAG_ENTRY("WriteOverflow",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
698/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
699 SEC_WRITE_DROPPED | SEC_SC_HALTED,
700 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
701/* 5-63 reserved*/
702};
703
704/*
705 * RXE Receive Error flags
706 */
707#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
708static struct flag_table rxe_err_status_flags[] = {
709/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
710/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
711/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
712/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
713/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
714/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
715/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
716/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
717/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
718/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
719/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
720/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
721/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
722/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
723/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
724/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
725/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
726 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
727/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
728/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
729/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
730 RXES(RBUF_BLOCK_LIST_READ_UNC)),
731/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
732 RXES(RBUF_BLOCK_LIST_READ_COR)),
733/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
734 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
735/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
736 RXES(RBUF_CSR_QENT_CNT_PARITY)),
737/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
738 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
739/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
740 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
741/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
742/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
743/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
744 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
745/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
746/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
747/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
748/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
749/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
750/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
751/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
752/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
753 RXES(RBUF_FL_INITDONE_PARITY)),
754/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
755 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
756/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
757/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
758/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
759/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
760 RXES(LOOKUP_DES_PART1_UNC_COR)),
761/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
762 RXES(LOOKUP_DES_PART2_PARITY)),
763/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
764/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
765/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
766/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
767/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
768/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
769/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
770/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
771/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
772/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
773/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
774/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
775/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
776/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
777/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
778/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
779/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
780/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
781/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
782/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
783/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
784/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
785};
786
787/* RXE errors that will trigger an SPC freeze */
788#define ALL_RXE_FREEZE_ERR \
789 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
833
834#define RXE_FREEZE_ABORT_MASK \
835 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
836 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
837 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
838
839/*
840 * DCC Error Flags
841 */
842#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
843static struct flag_table dcc_err_flags[] = {
844 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
845 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
846 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
847 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
848 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
849 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
850 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
851 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
852 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
853 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
854 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
855 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
856 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
857 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
858 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
859 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
860 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
861 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
862 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
863 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
864 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
865 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
866 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
867 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
868 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
869 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
870 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
871 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
872 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
873 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
874 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
875 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
876 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
877 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
878 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
879 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
880 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
881 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
882 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
883 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
884 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
885 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
886 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
887 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
888 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
889 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
890};
891
892/*
893 * LCB error flags
894 */
895#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
896static struct flag_table lcb_err_flags[] = {
897/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
898/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
899/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
900/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
901 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
902/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
903/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
904/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
905/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
906/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
907/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
908/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
909/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
910/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
911/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
912 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
913/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
914/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
915/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
916/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
917/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
918/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
919 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
920/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
921/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
922/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
923/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
924/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
925/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
926/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
927 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
928/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
929/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
930 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
931/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
932 LCBE(REDUNDANT_FLIT_PARITY_ERR))
933};
934
935/*
936 * DC8051 Error Flags
937 */
938#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
939static struct flag_table dc8051_err_flags[] = {
940 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
941 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
942 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
943 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
944 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
945 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
946 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
947 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
948 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800949 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400950 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
951};
952
953/*
954 * DC8051 Information Error flags
955 *
956 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
957 */
958static struct flag_table dc8051_info_err_flags[] = {
959 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
960 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
961 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
962 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800963 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400964 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
965 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
966 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
967 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
968 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
969 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
970 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -0800971 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
972 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400973};
974
975/*
976 * DC8051 Information Host Information flags
977 *
978 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
979 */
980static struct flag_table dc8051_info_host_msg_flags[] = {
981 FLAG_ENTRY0("Host request done", 0x0001),
982 FLAG_ENTRY0("BC SMA message", 0x0002),
983 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
984 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
985 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
986 FLAG_ENTRY0("External device config request", 0x0020),
987 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
988 FLAG_ENTRY0("LinkUp achieved", 0x0080),
989 FLAG_ENTRY0("Link going down", 0x0100),
990};
991
Mike Marciniszyn77241052015-07-30 15:17:43 -0400992static u32 encoded_size(u32 size);
993static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
994static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
995static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
996 u8 *continuous);
997static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
998 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
999static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1000 u8 *remote_tx_rate, u16 *link_widths);
1001static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1002 u8 *flag_bits, u16 *link_widths);
1003static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1004 u8 *device_rev);
1005static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1006static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1007static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1008 u8 *tx_polarity_inversion,
1009 u8 *rx_polarity_inversion, u8 *max_rate);
1010static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1013static void handle_dcc_err(struct hfi1_devdata *dd,
1014 unsigned int context, u64 err_status);
1015static void handle_lcb_err(struct hfi1_devdata *dd,
1016 unsigned int context, u64 err_status);
1017static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025static void set_partition_keys(struct hfi1_pportdata *);
1026static const char *link_state_name(u32 state);
1027static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1028 u32 state);
1029static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1030 u64 *out_data);
1031static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1032static int thermal_init(struct hfi1_devdata *dd);
1033
1034static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1035 int msecs);
1036static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001037static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001038static void handle_temp_err(struct hfi1_devdata *);
1039static void dc_shutdown(struct hfi1_devdata *);
1040static void dc_start(struct hfi1_devdata *);
Dean Luick8f000f72016-04-12 11:32:06 -07001041static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1042 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001043static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001044
1045/*
1046 * Error interrupt table entry. This is used as input to the interrupt
1047 * "clear down" routine used for all second tier error interrupt register.
1048 * Second tier interrupt registers have a single bit representing them
1049 * in the top-level CceIntStatus.
1050 */
1051struct err_reg_info {
1052 u32 status; /* status CSR offset */
1053 u32 clear; /* clear CSR offset */
1054 u32 mask; /* mask CSR offset */
1055 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1056 const char *desc;
1057};
1058
1059#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1060#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1061#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1062
1063/*
1064 * Helpers for building HFI and DC error interrupt table entries. Different
1065 * helpers are needed because of inconsistent register names.
1066 */
1067#define EE(reg, handler, desc) \
1068 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1069 handler, desc }
1070#define DC_EE1(reg, handler, desc) \
1071 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1072#define DC_EE2(reg, handler, desc) \
1073 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1074
1075/*
1076 * Table of the "misc" grouping of error interrupts. Each entry refers to
1077 * another register containing more information.
1078 */
1079static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1080/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1081/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1082/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1083/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1084/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1085/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1086/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1087/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1088 /* the rest are reserved */
1089};
1090
1091/*
1092 * Index into the Various section of the interrupt sources
1093 * corresponding to the Critical Temperature interrupt.
1094 */
1095#define TCRIT_INT_SOURCE 4
1096
1097/*
1098 * SDMA error interrupt entry - refers to another register containing more
1099 * information.
1100 */
1101static const struct err_reg_info sdma_eng_err =
1102 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1103
1104static const struct err_reg_info various_err[NUM_VARIOUS] = {
1105/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1106/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1107/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1108/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1109/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1110 /* rest are reserved */
1111};
1112
1113/*
1114 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1115 * register can not be derived from the MTU value because 10K is not
1116 * a power of 2. Therefore, we need a constant. Everything else can
1117 * be calculated.
1118 */
1119#define DCC_CFG_PORT_MTU_CAP_10240 7
1120
1121/*
1122 * Table of the DC grouping of error interrupts. Each entry refers to
1123 * another register containing more information.
1124 */
1125static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1126/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1127/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1128/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1129/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1130 /* the rest are reserved */
1131};
1132
1133struct cntr_entry {
1134 /*
1135 * counter name
1136 */
1137 char *name;
1138
1139 /*
1140 * csr to read for name (if applicable)
1141 */
1142 u64 csr;
1143
1144 /*
1145 * offset into dd or ppd to store the counter's value
1146 */
1147 int offset;
1148
1149 /*
1150 * flags
1151 */
1152 u8 flags;
1153
1154 /*
1155 * accessor for stat element, context either dd or ppd
1156 */
Jubin John17fb4f22016-02-14 20:21:52 -08001157 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1158 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001159};
1160
1161#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1162#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1163
1164#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1165{ \
1166 name, \
1167 csr, \
1168 offset, \
1169 flags, \
1170 accessor \
1171}
1172
1173/* 32bit RXE */
1174#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1175CNTR_ELEM(#name, \
1176 (counter * 8 + RCV_COUNTER_ARRAY32), \
1177 0, flags | CNTR_32BIT, \
1178 port_access_u32_csr)
1179
1180#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1181CNTR_ELEM(#name, \
1182 (counter * 8 + RCV_COUNTER_ARRAY32), \
1183 0, flags | CNTR_32BIT, \
1184 dev_access_u32_csr)
1185
1186/* 64bit RXE */
1187#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1188CNTR_ELEM(#name, \
1189 (counter * 8 + RCV_COUNTER_ARRAY64), \
1190 0, flags, \
1191 port_access_u64_csr)
1192
1193#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1194CNTR_ELEM(#name, \
1195 (counter * 8 + RCV_COUNTER_ARRAY64), \
1196 0, flags, \
1197 dev_access_u64_csr)
1198
1199#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1200#define OVR_ELM(ctx) \
1201CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001202 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001203 0, CNTR_NORMAL, port_access_u64_csr)
1204
1205/* 32bit TXE */
1206#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1207CNTR_ELEM(#name, \
1208 (counter * 8 + SEND_COUNTER_ARRAY32), \
1209 0, flags | CNTR_32BIT, \
1210 port_access_u32_csr)
1211
1212/* 64bit TXE */
1213#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1214CNTR_ELEM(#name, \
1215 (counter * 8 + SEND_COUNTER_ARRAY64), \
1216 0, flags, \
1217 port_access_u64_csr)
1218
1219# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1220CNTR_ELEM(#name,\
1221 counter * 8 + SEND_COUNTER_ARRAY64, \
1222 0, \
1223 flags, \
1224 dev_access_u64_csr)
1225
1226/* CCE */
1227#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1228CNTR_ELEM(#name, \
1229 (counter * 8 + CCE_COUNTER_ARRAY32), \
1230 0, flags | CNTR_32BIT, \
1231 dev_access_u32_csr)
1232
1233#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1234CNTR_ELEM(#name, \
1235 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1236 0, flags | CNTR_32BIT, \
1237 dev_access_u32_csr)
1238
1239/* DC */
1240#define DC_PERF_CNTR(name, counter, flags) \
1241CNTR_ELEM(#name, \
1242 counter, \
1243 0, \
1244 flags, \
1245 dev_access_u64_csr)
1246
1247#define DC_PERF_CNTR_LCB(name, counter, flags) \
1248CNTR_ELEM(#name, \
1249 counter, \
1250 0, \
1251 flags, \
1252 dc_access_lcb_cntr)
1253
1254/* ibp counters */
1255#define SW_IBP_CNTR(name, cntr) \
1256CNTR_ELEM(#name, \
1257 0, \
1258 0, \
1259 CNTR_SYNTH, \
1260 access_ibp_##cntr)
1261
1262u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1263{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001264 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301265 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001266 }
1267 return -1;
1268}
1269
1270void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1271{
1272 if (dd->flags & HFI1_PRESENT)
1273 writeq(value, (void __iomem *)dd->kregbase + offset);
1274}
1275
1276void __iomem *get_csr_addr(
1277 struct hfi1_devdata *dd,
1278 u32 offset)
1279{
1280 return (void __iomem *)dd->kregbase + offset;
1281}
1282
1283static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1284 int mode, u64 value)
1285{
1286 u64 ret;
1287
Mike Marciniszyn77241052015-07-30 15:17:43 -04001288 if (mode == CNTR_MODE_R) {
1289 ret = read_csr(dd, csr);
1290 } else if (mode == CNTR_MODE_W) {
1291 write_csr(dd, csr, value);
1292 ret = value;
1293 } else {
1294 dd_dev_err(dd, "Invalid cntr register access mode");
1295 return 0;
1296 }
1297
1298 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1299 return ret;
1300}
1301
1302/* Dev Access */
1303static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001304 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001305{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301306 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001307 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001308
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001309 if (entry->flags & CNTR_SDMA) {
1310 if (vl == CNTR_INVALID_VL)
1311 return 0;
1312 csr += 0x100 * vl;
1313 } else {
1314 if (vl != CNTR_INVALID_VL)
1315 return 0;
1316 }
1317 return read_write_csr(dd, csr, mode, data);
1318}
1319
1320static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1321 void *context, int idx, int mode, u64 data)
1322{
1323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1324
1325 if (dd->per_sdma && idx < dd->num_sdma)
1326 return dd->per_sdma[idx].err_cnt;
1327 return 0;
1328}
1329
1330static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1331 void *context, int idx, int mode, u64 data)
1332{
1333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1334
1335 if (dd->per_sdma && idx < dd->num_sdma)
1336 return dd->per_sdma[idx].sdma_int_cnt;
1337 return 0;
1338}
1339
1340static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1341 void *context, int idx, int mode, u64 data)
1342{
1343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1344
1345 if (dd->per_sdma && idx < dd->num_sdma)
1346 return dd->per_sdma[idx].idle_int_cnt;
1347 return 0;
1348}
1349
1350static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1351 void *context, int idx, int mode,
1352 u64 data)
1353{
1354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1355
1356 if (dd->per_sdma && idx < dd->num_sdma)
1357 return dd->per_sdma[idx].progress_int_cnt;
1358 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359}
1360
1361static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001362 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001363{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301364 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001365
1366 u64 val = 0;
1367 u64 csr = entry->csr;
1368
1369 if (entry->flags & CNTR_VL) {
1370 if (vl == CNTR_INVALID_VL)
1371 return 0;
1372 csr += 8 * vl;
1373 } else {
1374 if (vl != CNTR_INVALID_VL)
1375 return 0;
1376 }
1377
1378 val = read_write_csr(dd, csr, mode, data);
1379 return val;
1380}
1381
1382static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001383 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001384{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301385 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001386 u32 csr = entry->csr;
1387 int ret = 0;
1388
1389 if (vl != CNTR_INVALID_VL)
1390 return 0;
1391 if (mode == CNTR_MODE_R)
1392 ret = read_lcb_csr(dd, csr, &data);
1393 else if (mode == CNTR_MODE_W)
1394 ret = write_lcb_csr(dd, csr, data);
1395
1396 if (ret) {
1397 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1398 return 0;
1399 }
1400
1401 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1402 return data;
1403}
1404
1405/* Port Access */
1406static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001407 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001408{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301409 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001410
1411 if (vl != CNTR_INVALID_VL)
1412 return 0;
1413 return read_write_csr(ppd->dd, entry->csr, mode, data);
1414}
1415
1416static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001417 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001418{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301419 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001420 u64 val;
1421 u64 csr = entry->csr;
1422
1423 if (entry->flags & CNTR_VL) {
1424 if (vl == CNTR_INVALID_VL)
1425 return 0;
1426 csr += 8 * vl;
1427 } else {
1428 if (vl != CNTR_INVALID_VL)
1429 return 0;
1430 }
1431 val = read_write_csr(ppd->dd, csr, mode, data);
1432 return val;
1433}
1434
1435/* Software defined */
1436static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1437 u64 data)
1438{
1439 u64 ret;
1440
1441 if (mode == CNTR_MODE_R) {
1442 ret = *cntr;
1443 } else if (mode == CNTR_MODE_W) {
1444 *cntr = data;
1445 ret = data;
1446 } else {
1447 dd_dev_err(dd, "Invalid cntr sw access mode");
1448 return 0;
1449 }
1450
1451 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1452
1453 return ret;
1454}
1455
1456static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001457 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001458{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301459 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001460
1461 if (vl != CNTR_INVALID_VL)
1462 return 0;
1463 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1464}
1465
1466static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001467 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001468{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301469 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001470
1471 if (vl != CNTR_INVALID_VL)
1472 return 0;
1473 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1474}
1475
Dean Luick6d014532015-12-01 15:38:23 -05001476static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1477 void *context, int vl, int mode,
1478 u64 data)
1479{
1480 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1481
1482 if (vl != CNTR_INVALID_VL)
1483 return 0;
1484 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1485}
1486
Mike Marciniszyn77241052015-07-30 15:17:43 -04001487static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001488 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001489{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001490 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1491 u64 zero = 0;
1492 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001493
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001494 if (vl == CNTR_INVALID_VL)
1495 counter = &ppd->port_xmit_discards;
1496 else if (vl >= 0 && vl < C_VL_COUNT)
1497 counter = &ppd->port_xmit_discards_vl[vl];
1498 else
1499 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001500
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001501 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001502}
1503
1504static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001505 void *context, int vl, int mode,
1506 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001507{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301508 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001509
1510 if (vl != CNTR_INVALID_VL)
1511 return 0;
1512
1513 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1514 mode, data);
1515}
1516
1517static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001518 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001519{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301520 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001521
1522 if (vl != CNTR_INVALID_VL)
1523 return 0;
1524
1525 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1526 mode, data);
1527}
1528
1529u64 get_all_cpu_total(u64 __percpu *cntr)
1530{
1531 int cpu;
1532 u64 counter = 0;
1533
1534 for_each_possible_cpu(cpu)
1535 counter += *per_cpu_ptr(cntr, cpu);
1536 return counter;
1537}
1538
1539static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1540 u64 __percpu *cntr,
1541 int vl, int mode, u64 data)
1542{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001543 u64 ret = 0;
1544
1545 if (vl != CNTR_INVALID_VL)
1546 return 0;
1547
1548 if (mode == CNTR_MODE_R) {
1549 ret = get_all_cpu_total(cntr) - *z_val;
1550 } else if (mode == CNTR_MODE_W) {
1551 /* A write can only zero the counter */
1552 if (data == 0)
1553 *z_val = get_all_cpu_total(cntr);
1554 else
1555 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1556 } else {
1557 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1558 return 0;
1559 }
1560
1561 return ret;
1562}
1563
1564static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1565 void *context, int vl, int mode, u64 data)
1566{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301567 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001568
1569 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1570 mode, data);
1571}
1572
1573static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001574 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001575{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301576 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001577
1578 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1579 mode, data);
1580}
1581
1582static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1583 void *context, int vl, int mode, u64 data)
1584{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301585 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001586
1587 return dd->verbs_dev.n_piowait;
1588}
1589
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001590static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1591 void *context, int vl, int mode, u64 data)
1592{
1593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1594
1595 return dd->verbs_dev.n_piodrain;
1596}
1597
Mike Marciniszyn77241052015-07-30 15:17:43 -04001598static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1599 void *context, int vl, int mode, u64 data)
1600{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301601 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001602
1603 return dd->verbs_dev.n_txwait;
1604}
1605
1606static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1607 void *context, int vl, int mode, u64 data)
1608{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301609 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001610
1611 return dd->verbs_dev.n_kmem_wait;
1612}
1613
Dean Luickb4219222015-10-26 10:28:35 -04001614static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001615 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001616{
1617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1618
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001619 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1620 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001621}
1622
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001623/* Software counters for the error status bits within MISC_ERR_STATUS */
1624static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1625 void *context, int vl, int mode,
1626 u64 data)
1627{
1628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1629
1630 return dd->misc_err_status_cnt[12];
1631}
1632
1633static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1634 void *context, int vl, int mode,
1635 u64 data)
1636{
1637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1638
1639 return dd->misc_err_status_cnt[11];
1640}
1641
1642static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1643 void *context, int vl, int mode,
1644 u64 data)
1645{
1646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1647
1648 return dd->misc_err_status_cnt[10];
1649}
1650
1651static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1652 void *context, int vl,
1653 int mode, u64 data)
1654{
1655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1656
1657 return dd->misc_err_status_cnt[9];
1658}
1659
1660static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1661 void *context, int vl, int mode,
1662 u64 data)
1663{
1664 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1665
1666 return dd->misc_err_status_cnt[8];
1667}
1668
1669static u64 access_misc_efuse_read_bad_addr_err_cnt(
1670 const struct cntr_entry *entry,
1671 void *context, int vl, int mode, u64 data)
1672{
1673 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1674
1675 return dd->misc_err_status_cnt[7];
1676}
1677
1678static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1679 void *context, int vl,
1680 int mode, u64 data)
1681{
1682 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1683
1684 return dd->misc_err_status_cnt[6];
1685}
1686
1687static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1688 void *context, int vl, int mode,
1689 u64 data)
1690{
1691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1692
1693 return dd->misc_err_status_cnt[5];
1694}
1695
1696static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1697 void *context, int vl, int mode,
1698 u64 data)
1699{
1700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1701
1702 return dd->misc_err_status_cnt[4];
1703}
1704
1705static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1706 void *context, int vl,
1707 int mode, u64 data)
1708{
1709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1710
1711 return dd->misc_err_status_cnt[3];
1712}
1713
1714static u64 access_misc_csr_write_bad_addr_err_cnt(
1715 const struct cntr_entry *entry,
1716 void *context, int vl, int mode, u64 data)
1717{
1718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720 return dd->misc_err_status_cnt[2];
1721}
1722
1723static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1724 void *context, int vl,
1725 int mode, u64 data)
1726{
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729 return dd->misc_err_status_cnt[1];
1730}
1731
1732static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1733 void *context, int vl, int mode,
1734 u64 data)
1735{
1736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737
1738 return dd->misc_err_status_cnt[0];
1739}
1740
1741/*
1742 * Software counter for the aggregate of
1743 * individual CceErrStatus counters
1744 */
1745static u64 access_sw_cce_err_status_aggregated_cnt(
1746 const struct cntr_entry *entry,
1747 void *context, int vl, int mode, u64 data)
1748{
1749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1750
1751 return dd->sw_cce_err_status_aggregate;
1752}
1753
1754/*
1755 * Software counters corresponding to each of the
1756 * error status bits within CceErrStatus
1757 */
1758static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1759 void *context, int vl, int mode,
1760 u64 data)
1761{
1762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1763
1764 return dd->cce_err_status_cnt[40];
1765}
1766
1767static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1768 void *context, int vl, int mode,
1769 u64 data)
1770{
1771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1772
1773 return dd->cce_err_status_cnt[39];
1774}
1775
1776static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1777 void *context, int vl, int mode,
1778 u64 data)
1779{
1780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1781
1782 return dd->cce_err_status_cnt[38];
1783}
1784
1785static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1786 void *context, int vl, int mode,
1787 u64 data)
1788{
1789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1790
1791 return dd->cce_err_status_cnt[37];
1792}
1793
1794static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1795 void *context, int vl, int mode,
1796 u64 data)
1797{
1798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1799
1800 return dd->cce_err_status_cnt[36];
1801}
1802
1803static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1804 const struct cntr_entry *entry,
1805 void *context, int vl, int mode, u64 data)
1806{
1807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1808
1809 return dd->cce_err_status_cnt[35];
1810}
1811
1812static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1813 const struct cntr_entry *entry,
1814 void *context, int vl, int mode, u64 data)
1815{
1816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1817
1818 return dd->cce_err_status_cnt[34];
1819}
1820
1821static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1822 void *context, int vl,
1823 int mode, u64 data)
1824{
1825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826
1827 return dd->cce_err_status_cnt[33];
1828}
1829
1830static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1831 void *context, int vl, int mode,
1832 u64 data)
1833{
1834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1835
1836 return dd->cce_err_status_cnt[32];
1837}
1838
1839static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1840 void *context, int vl, int mode, u64 data)
1841{
1842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1843
1844 return dd->cce_err_status_cnt[31];
1845}
1846
1847static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1848 void *context, int vl, int mode,
1849 u64 data)
1850{
1851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1852
1853 return dd->cce_err_status_cnt[30];
1854}
1855
1856static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1857 void *context, int vl, int mode,
1858 u64 data)
1859{
1860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861
1862 return dd->cce_err_status_cnt[29];
1863}
1864
1865static u64 access_pcic_transmit_back_parity_err_cnt(
1866 const struct cntr_entry *entry,
1867 void *context, int vl, int mode, u64 data)
1868{
1869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870
1871 return dd->cce_err_status_cnt[28];
1872}
1873
1874static u64 access_pcic_transmit_front_parity_err_cnt(
1875 const struct cntr_entry *entry,
1876 void *context, int vl, int mode, u64 data)
1877{
1878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879
1880 return dd->cce_err_status_cnt[27];
1881}
1882
1883static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1884 void *context, int vl, int mode,
1885 u64 data)
1886{
1887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888
1889 return dd->cce_err_status_cnt[26];
1890}
1891
1892static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1893 void *context, int vl, int mode,
1894 u64 data)
1895{
1896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897
1898 return dd->cce_err_status_cnt[25];
1899}
1900
1901static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1902 void *context, int vl, int mode,
1903 u64 data)
1904{
1905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906
1907 return dd->cce_err_status_cnt[24];
1908}
1909
1910static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1911 void *context, int vl, int mode,
1912 u64 data)
1913{
1914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915
1916 return dd->cce_err_status_cnt[23];
1917}
1918
1919static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1920 void *context, int vl,
1921 int mode, u64 data)
1922{
1923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924
1925 return dd->cce_err_status_cnt[22];
1926}
1927
1928static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1929 void *context, int vl, int mode,
1930 u64 data)
1931{
1932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934 return dd->cce_err_status_cnt[21];
1935}
1936
1937static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1938 const struct cntr_entry *entry,
1939 void *context, int vl, int mode, u64 data)
1940{
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943 return dd->cce_err_status_cnt[20];
1944}
1945
1946static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1947 void *context, int vl,
1948 int mode, u64 data)
1949{
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952 return dd->cce_err_status_cnt[19];
1953}
1954
1955static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1956 void *context, int vl, int mode,
1957 u64 data)
1958{
1959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961 return dd->cce_err_status_cnt[18];
1962}
1963
1964static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1965 void *context, int vl, int mode,
1966 u64 data)
1967{
1968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970 return dd->cce_err_status_cnt[17];
1971}
1972
1973static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1974 void *context, int vl, int mode,
1975 u64 data)
1976{
1977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979 return dd->cce_err_status_cnt[16];
1980}
1981
1982static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1983 void *context, int vl, int mode,
1984 u64 data)
1985{
1986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988 return dd->cce_err_status_cnt[15];
1989}
1990
1991static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1992 void *context, int vl,
1993 int mode, u64 data)
1994{
1995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997 return dd->cce_err_status_cnt[14];
1998}
1999
2000static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2001 void *context, int vl, int mode,
2002 u64 data)
2003{
2004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006 return dd->cce_err_status_cnt[13];
2007}
2008
2009static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2010 const struct cntr_entry *entry,
2011 void *context, int vl, int mode, u64 data)
2012{
2013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015 return dd->cce_err_status_cnt[12];
2016}
2017
2018static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2019 const struct cntr_entry *entry,
2020 void *context, int vl, int mode, u64 data)
2021{
2022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024 return dd->cce_err_status_cnt[11];
2025}
2026
2027static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2028 const struct cntr_entry *entry,
2029 void *context, int vl, int mode, u64 data)
2030{
2031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033 return dd->cce_err_status_cnt[10];
2034}
2035
2036static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2037 const struct cntr_entry *entry,
2038 void *context, int vl, int mode, u64 data)
2039{
2040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042 return dd->cce_err_status_cnt[9];
2043}
2044
2045static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2046 const struct cntr_entry *entry,
2047 void *context, int vl, int mode, u64 data)
2048{
2049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051 return dd->cce_err_status_cnt[8];
2052}
2053
2054static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2055 void *context, int vl,
2056 int mode, u64 data)
2057{
2058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060 return dd->cce_err_status_cnt[7];
2061}
2062
2063static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2064 const struct cntr_entry *entry,
2065 void *context, int vl, int mode, u64 data)
2066{
2067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069 return dd->cce_err_status_cnt[6];
2070}
2071
2072static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2073 void *context, int vl, int mode,
2074 u64 data)
2075{
2076 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078 return dd->cce_err_status_cnt[5];
2079}
2080
2081static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2082 void *context, int vl, int mode,
2083 u64 data)
2084{
2085 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087 return dd->cce_err_status_cnt[4];
2088}
2089
2090static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2091 const struct cntr_entry *entry,
2092 void *context, int vl, int mode, u64 data)
2093{
2094 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096 return dd->cce_err_status_cnt[3];
2097}
2098
2099static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2100 void *context, int vl,
2101 int mode, u64 data)
2102{
2103 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105 return dd->cce_err_status_cnt[2];
2106}
2107
2108static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2109 void *context, int vl,
2110 int mode, u64 data)
2111{
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114 return dd->cce_err_status_cnt[1];
2115}
2116
2117static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2118 void *context, int vl, int mode,
2119 u64 data)
2120{
2121 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123 return dd->cce_err_status_cnt[0];
2124}
2125
2126/*
2127 * Software counters corresponding to each of the
2128 * error status bits within RcvErrStatus
2129 */
2130static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2131 void *context, int vl, int mode,
2132 u64 data)
2133{
2134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2135
2136 return dd->rcv_err_status_cnt[63];
2137}
2138
2139static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2140 void *context, int vl,
2141 int mode, u64 data)
2142{
2143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2144
2145 return dd->rcv_err_status_cnt[62];
2146}
2147
2148static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2149 void *context, int vl, int mode,
2150 u64 data)
2151{
2152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2153
2154 return dd->rcv_err_status_cnt[61];
2155}
2156
2157static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2158 void *context, int vl, int mode,
2159 u64 data)
2160{
2161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2162
2163 return dd->rcv_err_status_cnt[60];
2164}
2165
2166static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2167 void *context, int vl,
2168 int mode, u64 data)
2169{
2170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2171
2172 return dd->rcv_err_status_cnt[59];
2173}
2174
2175static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2176 void *context, int vl,
2177 int mode, u64 data)
2178{
2179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2180
2181 return dd->rcv_err_status_cnt[58];
2182}
2183
2184static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2185 void *context, int vl, int mode,
2186 u64 data)
2187{
2188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2189
2190 return dd->rcv_err_status_cnt[57];
2191}
2192
2193static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2194 void *context, int vl, int mode,
2195 u64 data)
2196{
2197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2198
2199 return dd->rcv_err_status_cnt[56];
2200}
2201
2202static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2203 void *context, int vl, int mode,
2204 u64 data)
2205{
2206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2207
2208 return dd->rcv_err_status_cnt[55];
2209}
2210
2211static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2212 const struct cntr_entry *entry,
2213 void *context, int vl, int mode, u64 data)
2214{
2215 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2216
2217 return dd->rcv_err_status_cnt[54];
2218}
2219
2220static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2221 const struct cntr_entry *entry,
2222 void *context, int vl, int mode, u64 data)
2223{
2224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226 return dd->rcv_err_status_cnt[53];
2227}
2228
2229static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2230 void *context, int vl,
2231 int mode, u64 data)
2232{
2233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235 return dd->rcv_err_status_cnt[52];
2236}
2237
2238static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2239 void *context, int vl,
2240 int mode, u64 data)
2241{
2242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244 return dd->rcv_err_status_cnt[51];
2245}
2246
2247static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2248 void *context, int vl,
2249 int mode, u64 data)
2250{
2251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253 return dd->rcv_err_status_cnt[50];
2254}
2255
2256static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2257 void *context, int vl,
2258 int mode, u64 data)
2259{
2260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262 return dd->rcv_err_status_cnt[49];
2263}
2264
2265static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2266 void *context, int vl,
2267 int mode, u64 data)
2268{
2269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271 return dd->rcv_err_status_cnt[48];
2272}
2273
2274static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2275 void *context, int vl,
2276 int mode, u64 data)
2277{
2278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280 return dd->rcv_err_status_cnt[47];
2281}
2282
2283static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2284 void *context, int vl, int mode,
2285 u64 data)
2286{
2287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289 return dd->rcv_err_status_cnt[46];
2290}
2291
2292static u64 access_rx_hq_intr_csr_parity_err_cnt(
2293 const struct cntr_entry *entry,
2294 void *context, int vl, int mode, u64 data)
2295{
2296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298 return dd->rcv_err_status_cnt[45];
2299}
2300
2301static u64 access_rx_lookup_csr_parity_err_cnt(
2302 const struct cntr_entry *entry,
2303 void *context, int vl, int mode, u64 data)
2304{
2305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307 return dd->rcv_err_status_cnt[44];
2308}
2309
2310static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2311 const struct cntr_entry *entry,
2312 void *context, int vl, int mode, u64 data)
2313{
2314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316 return dd->rcv_err_status_cnt[43];
2317}
2318
2319static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2320 const struct cntr_entry *entry,
2321 void *context, int vl, int mode, u64 data)
2322{
2323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325 return dd->rcv_err_status_cnt[42];
2326}
2327
2328static u64 access_rx_lookup_des_part2_parity_err_cnt(
2329 const struct cntr_entry *entry,
2330 void *context, int vl, int mode, u64 data)
2331{
2332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334 return dd->rcv_err_status_cnt[41];
2335}
2336
2337static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2338 const struct cntr_entry *entry,
2339 void *context, int vl, int mode, u64 data)
2340{
2341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343 return dd->rcv_err_status_cnt[40];
2344}
2345
2346static u64 access_rx_lookup_des_part1_unc_err_cnt(
2347 const struct cntr_entry *entry,
2348 void *context, int vl, int mode, u64 data)
2349{
2350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352 return dd->rcv_err_status_cnt[39];
2353}
2354
2355static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2356 const struct cntr_entry *entry,
2357 void *context, int vl, int mode, u64 data)
2358{
2359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361 return dd->rcv_err_status_cnt[38];
2362}
2363
2364static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2365 const struct cntr_entry *entry,
2366 void *context, int vl, int mode, u64 data)
2367{
2368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370 return dd->rcv_err_status_cnt[37];
2371}
2372
2373static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2374 const struct cntr_entry *entry,
2375 void *context, int vl, int mode, u64 data)
2376{
2377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379 return dd->rcv_err_status_cnt[36];
2380}
2381
2382static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2383 const struct cntr_entry *entry,
2384 void *context, int vl, int mode, u64 data)
2385{
2386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388 return dd->rcv_err_status_cnt[35];
2389}
2390
2391static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2392 const struct cntr_entry *entry,
2393 void *context, int vl, int mode, u64 data)
2394{
2395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397 return dd->rcv_err_status_cnt[34];
2398}
2399
2400static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2401 const struct cntr_entry *entry,
2402 void *context, int vl, int mode, u64 data)
2403{
2404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406 return dd->rcv_err_status_cnt[33];
2407}
2408
2409static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2410 void *context, int vl, int mode,
2411 u64 data)
2412{
2413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415 return dd->rcv_err_status_cnt[32];
2416}
2417
2418static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2419 void *context, int vl, int mode,
2420 u64 data)
2421{
2422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424 return dd->rcv_err_status_cnt[31];
2425}
2426
2427static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2428 void *context, int vl, int mode,
2429 u64 data)
2430{
2431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433 return dd->rcv_err_status_cnt[30];
2434}
2435
2436static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2437 void *context, int vl, int mode,
2438 u64 data)
2439{
2440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442 return dd->rcv_err_status_cnt[29];
2443}
2444
2445static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2446 void *context, int vl,
2447 int mode, u64 data)
2448{
2449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451 return dd->rcv_err_status_cnt[28];
2452}
2453
2454static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2455 const struct cntr_entry *entry,
2456 void *context, int vl, int mode, u64 data)
2457{
2458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460 return dd->rcv_err_status_cnt[27];
2461}
2462
2463static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2464 const struct cntr_entry *entry,
2465 void *context, int vl, int mode, u64 data)
2466{
2467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469 return dd->rcv_err_status_cnt[26];
2470}
2471
2472static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2473 const struct cntr_entry *entry,
2474 void *context, int vl, int mode, u64 data)
2475{
2476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478 return dd->rcv_err_status_cnt[25];
2479}
2480
2481static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2482 const struct cntr_entry *entry,
2483 void *context, int vl, int mode, u64 data)
2484{
2485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487 return dd->rcv_err_status_cnt[24];
2488}
2489
2490static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2491 const struct cntr_entry *entry,
2492 void *context, int vl, int mode, u64 data)
2493{
2494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496 return dd->rcv_err_status_cnt[23];
2497}
2498
2499static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2500 const struct cntr_entry *entry,
2501 void *context, int vl, int mode, u64 data)
2502{
2503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505 return dd->rcv_err_status_cnt[22];
2506}
2507
2508static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2509 const struct cntr_entry *entry,
2510 void *context, int vl, int mode, u64 data)
2511{
2512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514 return dd->rcv_err_status_cnt[21];
2515}
2516
2517static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2518 const struct cntr_entry *entry,
2519 void *context, int vl, int mode, u64 data)
2520{
2521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523 return dd->rcv_err_status_cnt[20];
2524}
2525
2526static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2527 const struct cntr_entry *entry,
2528 void *context, int vl, int mode, u64 data)
2529{
2530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532 return dd->rcv_err_status_cnt[19];
2533}
2534
2535static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2536 void *context, int vl,
2537 int mode, u64 data)
2538{
2539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541 return dd->rcv_err_status_cnt[18];
2542}
2543
2544static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2545 void *context, int vl,
2546 int mode, u64 data)
2547{
2548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550 return dd->rcv_err_status_cnt[17];
2551}
2552
2553static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2554 const struct cntr_entry *entry,
2555 void *context, int vl, int mode, u64 data)
2556{
2557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559 return dd->rcv_err_status_cnt[16];
2560}
2561
2562static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2563 const struct cntr_entry *entry,
2564 void *context, int vl, int mode, u64 data)
2565{
2566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568 return dd->rcv_err_status_cnt[15];
2569}
2570
2571static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2572 void *context, int vl,
2573 int mode, u64 data)
2574{
2575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577 return dd->rcv_err_status_cnt[14];
2578}
2579
2580static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2581 void *context, int vl,
2582 int mode, u64 data)
2583{
2584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586 return dd->rcv_err_status_cnt[13];
2587}
2588
2589static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2590 void *context, int vl, int mode,
2591 u64 data)
2592{
2593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595 return dd->rcv_err_status_cnt[12];
2596}
2597
2598static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2599 void *context, int vl, int mode,
2600 u64 data)
2601{
2602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604 return dd->rcv_err_status_cnt[11];
2605}
2606
2607static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2608 void *context, int vl, int mode,
2609 u64 data)
2610{
2611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613 return dd->rcv_err_status_cnt[10];
2614}
2615
2616static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2617 void *context, int vl, int mode,
2618 u64 data)
2619{
2620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622 return dd->rcv_err_status_cnt[9];
2623}
2624
2625static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2626 void *context, int vl, int mode,
2627 u64 data)
2628{
2629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631 return dd->rcv_err_status_cnt[8];
2632}
2633
2634static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2635 const struct cntr_entry *entry,
2636 void *context, int vl, int mode, u64 data)
2637{
2638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640 return dd->rcv_err_status_cnt[7];
2641}
2642
2643static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2644 const struct cntr_entry *entry,
2645 void *context, int vl, int mode, u64 data)
2646{
2647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649 return dd->rcv_err_status_cnt[6];
2650}
2651
2652static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2653 void *context, int vl, int mode,
2654 u64 data)
2655{
2656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658 return dd->rcv_err_status_cnt[5];
2659}
2660
2661static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2662 void *context, int vl, int mode,
2663 u64 data)
2664{
2665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667 return dd->rcv_err_status_cnt[4];
2668}
2669
2670static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2671 void *context, int vl, int mode,
2672 u64 data)
2673{
2674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676 return dd->rcv_err_status_cnt[3];
2677}
2678
2679static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2680 void *context, int vl, int mode,
2681 u64 data)
2682{
2683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685 return dd->rcv_err_status_cnt[2];
2686}
2687
2688static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2689 void *context, int vl, int mode,
2690 u64 data)
2691{
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694 return dd->rcv_err_status_cnt[1];
2695}
2696
2697static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2698 void *context, int vl, int mode,
2699 u64 data)
2700{
2701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703 return dd->rcv_err_status_cnt[0];
2704}
2705
2706/*
2707 * Software counters corresponding to each of the
2708 * error status bits within SendPioErrStatus
2709 */
2710static u64 access_pio_pec_sop_head_parity_err_cnt(
2711 const struct cntr_entry *entry,
2712 void *context, int vl, int mode, u64 data)
2713{
2714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2715
2716 return dd->send_pio_err_status_cnt[35];
2717}
2718
2719static u64 access_pio_pcc_sop_head_parity_err_cnt(
2720 const struct cntr_entry *entry,
2721 void *context, int vl, int mode, u64 data)
2722{
2723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2724
2725 return dd->send_pio_err_status_cnt[34];
2726}
2727
2728static u64 access_pio_last_returned_cnt_parity_err_cnt(
2729 const struct cntr_entry *entry,
2730 void *context, int vl, int mode, u64 data)
2731{
2732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2733
2734 return dd->send_pio_err_status_cnt[33];
2735}
2736
2737static u64 access_pio_current_free_cnt_parity_err_cnt(
2738 const struct cntr_entry *entry,
2739 void *context, int vl, int mode, u64 data)
2740{
2741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2742
2743 return dd->send_pio_err_status_cnt[32];
2744}
2745
2746static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2747 void *context, int vl, int mode,
2748 u64 data)
2749{
2750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2751
2752 return dd->send_pio_err_status_cnt[31];
2753}
2754
2755static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2756 void *context, int vl, int mode,
2757 u64 data)
2758{
2759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2760
2761 return dd->send_pio_err_status_cnt[30];
2762}
2763
2764static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2765 void *context, int vl, int mode,
2766 u64 data)
2767{
2768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2769
2770 return dd->send_pio_err_status_cnt[29];
2771}
2772
2773static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2774 const struct cntr_entry *entry,
2775 void *context, int vl, int mode, u64 data)
2776{
2777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2778
2779 return dd->send_pio_err_status_cnt[28];
2780}
2781
2782static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2783 void *context, int vl, int mode,
2784 u64 data)
2785{
2786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2787
2788 return dd->send_pio_err_status_cnt[27];
2789}
2790
2791static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2792 void *context, int vl, int mode,
2793 u64 data)
2794{
2795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2796
2797 return dd->send_pio_err_status_cnt[26];
2798}
2799
2800static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2801 void *context, int vl,
2802 int mode, u64 data)
2803{
2804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806 return dd->send_pio_err_status_cnt[25];
2807}
2808
2809static u64 access_pio_block_qw_count_parity_err_cnt(
2810 const struct cntr_entry *entry,
2811 void *context, int vl, int mode, u64 data)
2812{
2813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815 return dd->send_pio_err_status_cnt[24];
2816}
2817
2818static u64 access_pio_write_qw_valid_parity_err_cnt(
2819 const struct cntr_entry *entry,
2820 void *context, int vl, int mode, u64 data)
2821{
2822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824 return dd->send_pio_err_status_cnt[23];
2825}
2826
2827static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2828 void *context, int vl, int mode,
2829 u64 data)
2830{
2831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833 return dd->send_pio_err_status_cnt[22];
2834}
2835
2836static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2837 void *context, int vl,
2838 int mode, u64 data)
2839{
2840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842 return dd->send_pio_err_status_cnt[21];
2843}
2844
2845static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2846 void *context, int vl,
2847 int mode, u64 data)
2848{
2849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851 return dd->send_pio_err_status_cnt[20];
2852}
2853
2854static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2855 void *context, int vl,
2856 int mode, u64 data)
2857{
2858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860 return dd->send_pio_err_status_cnt[19];
2861}
2862
2863static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2864 const struct cntr_entry *entry,
2865 void *context, int vl, int mode, u64 data)
2866{
2867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869 return dd->send_pio_err_status_cnt[18];
2870}
2871
2872static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2873 void *context, int vl, int mode,
2874 u64 data)
2875{
2876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878 return dd->send_pio_err_status_cnt[17];
2879}
2880
2881static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2882 void *context, int vl, int mode,
2883 u64 data)
2884{
2885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887 return dd->send_pio_err_status_cnt[16];
2888}
2889
2890static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2891 const struct cntr_entry *entry,
2892 void *context, int vl, int mode, u64 data)
2893{
2894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896 return dd->send_pio_err_status_cnt[15];
2897}
2898
2899static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2900 const struct cntr_entry *entry,
2901 void *context, int vl, int mode, u64 data)
2902{
2903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905 return dd->send_pio_err_status_cnt[14];
2906}
2907
2908static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2909 const struct cntr_entry *entry,
2910 void *context, int vl, int mode, u64 data)
2911{
2912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914 return dd->send_pio_err_status_cnt[13];
2915}
2916
2917static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2918 const struct cntr_entry *entry,
2919 void *context, int vl, int mode, u64 data)
2920{
2921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923 return dd->send_pio_err_status_cnt[12];
2924}
2925
2926static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2927 const struct cntr_entry *entry,
2928 void *context, int vl, int mode, u64 data)
2929{
2930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932 return dd->send_pio_err_status_cnt[11];
2933}
2934
2935static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2936 const struct cntr_entry *entry,
2937 void *context, int vl, int mode, u64 data)
2938{
2939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941 return dd->send_pio_err_status_cnt[10];
2942}
2943
2944static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2945 const struct cntr_entry *entry,
2946 void *context, int vl, int mode, u64 data)
2947{
2948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950 return dd->send_pio_err_status_cnt[9];
2951}
2952
2953static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2954 const struct cntr_entry *entry,
2955 void *context, int vl, int mode, u64 data)
2956{
2957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959 return dd->send_pio_err_status_cnt[8];
2960}
2961
2962static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2963 const struct cntr_entry *entry,
2964 void *context, int vl, int mode, u64 data)
2965{
2966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968 return dd->send_pio_err_status_cnt[7];
2969}
2970
2971static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2972 void *context, int vl, int mode,
2973 u64 data)
2974{
2975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977 return dd->send_pio_err_status_cnt[6];
2978}
2979
2980static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2981 void *context, int vl, int mode,
2982 u64 data)
2983{
2984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986 return dd->send_pio_err_status_cnt[5];
2987}
2988
2989static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2990 void *context, int vl, int mode,
2991 u64 data)
2992{
2993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995 return dd->send_pio_err_status_cnt[4];
2996}
2997
2998static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2999 void *context, int vl, int mode,
3000 u64 data)
3001{
3002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004 return dd->send_pio_err_status_cnt[3];
3005}
3006
3007static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3008 void *context, int vl, int mode,
3009 u64 data)
3010{
3011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013 return dd->send_pio_err_status_cnt[2];
3014}
3015
3016static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3017 void *context, int vl,
3018 int mode, u64 data)
3019{
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022 return dd->send_pio_err_status_cnt[1];
3023}
3024
3025static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3026 void *context, int vl, int mode,
3027 u64 data)
3028{
3029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031 return dd->send_pio_err_status_cnt[0];
3032}
3033
3034/*
3035 * Software counters corresponding to each of the
3036 * error status bits within SendDmaErrStatus
3037 */
3038static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3039 const struct cntr_entry *entry,
3040 void *context, int vl, int mode, u64 data)
3041{
3042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3043
3044 return dd->send_dma_err_status_cnt[3];
3045}
3046
3047static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3048 const struct cntr_entry *entry,
3049 void *context, int vl, int mode, u64 data)
3050{
3051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3052
3053 return dd->send_dma_err_status_cnt[2];
3054}
3055
3056static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3057 void *context, int vl, int mode,
3058 u64 data)
3059{
3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061
3062 return dd->send_dma_err_status_cnt[1];
3063}
3064
3065static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3066 void *context, int vl, int mode,
3067 u64 data)
3068{
3069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3070
3071 return dd->send_dma_err_status_cnt[0];
3072}
3073
3074/*
3075 * Software counters corresponding to each of the
3076 * error status bits within SendEgressErrStatus
3077 */
3078static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3079 const struct cntr_entry *entry,
3080 void *context, int vl, int mode, u64 data)
3081{
3082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083
3084 return dd->send_egress_err_status_cnt[63];
3085}
3086
3087static u64 access_tx_read_sdma_memory_csr_err_cnt(
3088 const struct cntr_entry *entry,
3089 void *context, int vl, int mode, u64 data)
3090{
3091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092
3093 return dd->send_egress_err_status_cnt[62];
3094}
3095
3096static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3097 void *context, int vl, int mode,
3098 u64 data)
3099{
3100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101
3102 return dd->send_egress_err_status_cnt[61];
3103}
3104
3105static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3106 void *context, int vl,
3107 int mode, u64 data)
3108{
3109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110
3111 return dd->send_egress_err_status_cnt[60];
3112}
3113
3114static u64 access_tx_read_sdma_memory_cor_err_cnt(
3115 const struct cntr_entry *entry,
3116 void *context, int vl, int mode, u64 data)
3117{
3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119
3120 return dd->send_egress_err_status_cnt[59];
3121}
3122
3123static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3124 void *context, int vl, int mode,
3125 u64 data)
3126{
3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128
3129 return dd->send_egress_err_status_cnt[58];
3130}
3131
3132static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3133 void *context, int vl, int mode,
3134 u64 data)
3135{
3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3137
3138 return dd->send_egress_err_status_cnt[57];
3139}
3140
3141static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3142 void *context, int vl, int mode,
3143 u64 data)
3144{
3145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3146
3147 return dd->send_egress_err_status_cnt[56];
3148}
3149
3150static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3151 void *context, int vl, int mode,
3152 u64 data)
3153{
3154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3155
3156 return dd->send_egress_err_status_cnt[55];
3157}
3158
3159static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3160 void *context, int vl, int mode,
3161 u64 data)
3162{
3163 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3164
3165 return dd->send_egress_err_status_cnt[54];
3166}
3167
3168static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3169 void *context, int vl, int mode,
3170 u64 data)
3171{
3172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174 return dd->send_egress_err_status_cnt[53];
3175}
3176
3177static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3178 void *context, int vl, int mode,
3179 u64 data)
3180{
3181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183 return dd->send_egress_err_status_cnt[52];
3184}
3185
3186static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3187 void *context, int vl, int mode,
3188 u64 data)
3189{
3190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192 return dd->send_egress_err_status_cnt[51];
3193}
3194
3195static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3196 void *context, int vl, int mode,
3197 u64 data)
3198{
3199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201 return dd->send_egress_err_status_cnt[50];
3202}
3203
3204static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3205 void *context, int vl, int mode,
3206 u64 data)
3207{
3208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210 return dd->send_egress_err_status_cnt[49];
3211}
3212
3213static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3214 void *context, int vl, int mode,
3215 u64 data)
3216{
3217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219 return dd->send_egress_err_status_cnt[48];
3220}
3221
3222static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3223 void *context, int vl, int mode,
3224 u64 data)
3225{
3226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228 return dd->send_egress_err_status_cnt[47];
3229}
3230
3231static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3232 void *context, int vl, int mode,
3233 u64 data)
3234{
3235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237 return dd->send_egress_err_status_cnt[46];
3238}
3239
3240static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3241 void *context, int vl, int mode,
3242 u64 data)
3243{
3244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246 return dd->send_egress_err_status_cnt[45];
3247}
3248
3249static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3250 void *context, int vl,
3251 int mode, u64 data)
3252{
3253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255 return dd->send_egress_err_status_cnt[44];
3256}
3257
3258static u64 access_tx_read_sdma_memory_unc_err_cnt(
3259 const struct cntr_entry *entry,
3260 void *context, int vl, int mode, u64 data)
3261{
3262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264 return dd->send_egress_err_status_cnt[43];
3265}
3266
3267static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3268 void *context, int vl, int mode,
3269 u64 data)
3270{
3271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273 return dd->send_egress_err_status_cnt[42];
3274}
3275
3276static u64 access_tx_credit_return_partiy_err_cnt(
3277 const struct cntr_entry *entry,
3278 void *context, int vl, int mode, u64 data)
3279{
3280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282 return dd->send_egress_err_status_cnt[41];
3283}
3284
3285static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3286 const struct cntr_entry *entry,
3287 void *context, int vl, int mode, u64 data)
3288{
3289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291 return dd->send_egress_err_status_cnt[40];
3292}
3293
3294static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3295 const struct cntr_entry *entry,
3296 void *context, int vl, int mode, u64 data)
3297{
3298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300 return dd->send_egress_err_status_cnt[39];
3301}
3302
3303static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3304 const struct cntr_entry *entry,
3305 void *context, int vl, int mode, u64 data)
3306{
3307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309 return dd->send_egress_err_status_cnt[38];
3310}
3311
3312static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3313 const struct cntr_entry *entry,
3314 void *context, int vl, int mode, u64 data)
3315{
3316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318 return dd->send_egress_err_status_cnt[37];
3319}
3320
3321static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3322 const struct cntr_entry *entry,
3323 void *context, int vl, int mode, u64 data)
3324{
3325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327 return dd->send_egress_err_status_cnt[36];
3328}
3329
3330static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3331 const struct cntr_entry *entry,
3332 void *context, int vl, int mode, u64 data)
3333{
3334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336 return dd->send_egress_err_status_cnt[35];
3337}
3338
3339static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3340 const struct cntr_entry *entry,
3341 void *context, int vl, int mode, u64 data)
3342{
3343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345 return dd->send_egress_err_status_cnt[34];
3346}
3347
3348static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3349 const struct cntr_entry *entry,
3350 void *context, int vl, int mode, u64 data)
3351{
3352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354 return dd->send_egress_err_status_cnt[33];
3355}
3356
3357static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3358 const struct cntr_entry *entry,
3359 void *context, int vl, int mode, u64 data)
3360{
3361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363 return dd->send_egress_err_status_cnt[32];
3364}
3365
3366static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3367 const struct cntr_entry *entry,
3368 void *context, int vl, int mode, u64 data)
3369{
3370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372 return dd->send_egress_err_status_cnt[31];
3373}
3374
3375static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3376 const struct cntr_entry *entry,
3377 void *context, int vl, int mode, u64 data)
3378{
3379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381 return dd->send_egress_err_status_cnt[30];
3382}
3383
3384static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3385 const struct cntr_entry *entry,
3386 void *context, int vl, int mode, u64 data)
3387{
3388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390 return dd->send_egress_err_status_cnt[29];
3391}
3392
3393static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3394 const struct cntr_entry *entry,
3395 void *context, int vl, int mode, u64 data)
3396{
3397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399 return dd->send_egress_err_status_cnt[28];
3400}
3401
3402static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3403 const struct cntr_entry *entry,
3404 void *context, int vl, int mode, u64 data)
3405{
3406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408 return dd->send_egress_err_status_cnt[27];
3409}
3410
3411static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3412 const struct cntr_entry *entry,
3413 void *context, int vl, int mode, u64 data)
3414{
3415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417 return dd->send_egress_err_status_cnt[26];
3418}
3419
3420static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3421 const struct cntr_entry *entry,
3422 void *context, int vl, int mode, u64 data)
3423{
3424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426 return dd->send_egress_err_status_cnt[25];
3427}
3428
3429static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3430 const struct cntr_entry *entry,
3431 void *context, int vl, int mode, u64 data)
3432{
3433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435 return dd->send_egress_err_status_cnt[24];
3436}
3437
3438static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3439 const struct cntr_entry *entry,
3440 void *context, int vl, int mode, u64 data)
3441{
3442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444 return dd->send_egress_err_status_cnt[23];
3445}
3446
3447static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3448 const struct cntr_entry *entry,
3449 void *context, int vl, int mode, u64 data)
3450{
3451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453 return dd->send_egress_err_status_cnt[22];
3454}
3455
3456static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3457 const struct cntr_entry *entry,
3458 void *context, int vl, int mode, u64 data)
3459{
3460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462 return dd->send_egress_err_status_cnt[21];
3463}
3464
3465static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3466 const struct cntr_entry *entry,
3467 void *context, int vl, int mode, u64 data)
3468{
3469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471 return dd->send_egress_err_status_cnt[20];
3472}
3473
3474static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3475 const struct cntr_entry *entry,
3476 void *context, int vl, int mode, u64 data)
3477{
3478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480 return dd->send_egress_err_status_cnt[19];
3481}
3482
3483static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3484 const struct cntr_entry *entry,
3485 void *context, int vl, int mode, u64 data)
3486{
3487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489 return dd->send_egress_err_status_cnt[18];
3490}
3491
3492static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3493 const struct cntr_entry *entry,
3494 void *context, int vl, int mode, u64 data)
3495{
3496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498 return dd->send_egress_err_status_cnt[17];
3499}
3500
3501static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3502 const struct cntr_entry *entry,
3503 void *context, int vl, int mode, u64 data)
3504{
3505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507 return dd->send_egress_err_status_cnt[16];
3508}
3509
3510static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3511 void *context, int vl, int mode,
3512 u64 data)
3513{
3514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516 return dd->send_egress_err_status_cnt[15];
3517}
3518
3519static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3520 void *context, int vl,
3521 int mode, u64 data)
3522{
3523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525 return dd->send_egress_err_status_cnt[14];
3526}
3527
3528static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3529 void *context, int vl, int mode,
3530 u64 data)
3531{
3532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534 return dd->send_egress_err_status_cnt[13];
3535}
3536
3537static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3538 void *context, int vl, int mode,
3539 u64 data)
3540{
3541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543 return dd->send_egress_err_status_cnt[12];
3544}
3545
3546static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3547 const struct cntr_entry *entry,
3548 void *context, int vl, int mode, u64 data)
3549{
3550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552 return dd->send_egress_err_status_cnt[11];
3553}
3554
3555static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3556 void *context, int vl, int mode,
3557 u64 data)
3558{
3559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561 return dd->send_egress_err_status_cnt[10];
3562}
3563
3564static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3565 void *context, int vl, int mode,
3566 u64 data)
3567{
3568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570 return dd->send_egress_err_status_cnt[9];
3571}
3572
3573static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3574 const struct cntr_entry *entry,
3575 void *context, int vl, int mode, u64 data)
3576{
3577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579 return dd->send_egress_err_status_cnt[8];
3580}
3581
3582static u64 access_tx_pio_launch_intf_parity_err_cnt(
3583 const struct cntr_entry *entry,
3584 void *context, int vl, int mode, u64 data)
3585{
3586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588 return dd->send_egress_err_status_cnt[7];
3589}
3590
3591static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3592 void *context, int vl, int mode,
3593 u64 data)
3594{
3595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597 return dd->send_egress_err_status_cnt[6];
3598}
3599
3600static u64 access_tx_incorrect_link_state_err_cnt(
3601 const struct cntr_entry *entry,
3602 void *context, int vl, int mode, u64 data)
3603{
3604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606 return dd->send_egress_err_status_cnt[5];
3607}
3608
3609static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3610 void *context, int vl, int mode,
3611 u64 data)
3612{
3613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615 return dd->send_egress_err_status_cnt[4];
3616}
3617
3618static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3619 const struct cntr_entry *entry,
3620 void *context, int vl, int mode, u64 data)
3621{
3622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624 return dd->send_egress_err_status_cnt[3];
3625}
3626
3627static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3628 void *context, int vl, int mode,
3629 u64 data)
3630{
3631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633 return dd->send_egress_err_status_cnt[2];
3634}
3635
3636static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3637 const struct cntr_entry *entry,
3638 void *context, int vl, int mode, u64 data)
3639{
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642 return dd->send_egress_err_status_cnt[1];
3643}
3644
3645static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3646 const struct cntr_entry *entry,
3647 void *context, int vl, int mode, u64 data)
3648{
3649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651 return dd->send_egress_err_status_cnt[0];
3652}
3653
3654/*
3655 * Software counters corresponding to each of the
3656 * error status bits within SendErrStatus
3657 */
3658static u64 access_send_csr_write_bad_addr_err_cnt(
3659 const struct cntr_entry *entry,
3660 void *context, int vl, int mode, u64 data)
3661{
3662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3663
3664 return dd->send_err_status_cnt[2];
3665}
3666
3667static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3668 void *context, int vl,
3669 int mode, u64 data)
3670{
3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672
3673 return dd->send_err_status_cnt[1];
3674}
3675
3676static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3677 void *context, int vl, int mode,
3678 u64 data)
3679{
3680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3681
3682 return dd->send_err_status_cnt[0];
3683}
3684
3685/*
3686 * Software counters corresponding to each of the
3687 * error status bits within SendCtxtErrStatus
3688 */
3689static u64 access_pio_write_out_of_bounds_err_cnt(
3690 const struct cntr_entry *entry,
3691 void *context, int vl, int mode, u64 data)
3692{
3693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3694
3695 return dd->sw_ctxt_err_status_cnt[4];
3696}
3697
3698static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3699 void *context, int vl, int mode,
3700 u64 data)
3701{
3702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3703
3704 return dd->sw_ctxt_err_status_cnt[3];
3705}
3706
3707static u64 access_pio_write_crosses_boundary_err_cnt(
3708 const struct cntr_entry *entry,
3709 void *context, int vl, int mode, u64 data)
3710{
3711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3712
3713 return dd->sw_ctxt_err_status_cnt[2];
3714}
3715
3716static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3717 void *context, int vl,
3718 int mode, u64 data)
3719{
3720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722 return dd->sw_ctxt_err_status_cnt[1];
3723}
3724
3725static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3726 void *context, int vl, int mode,
3727 u64 data)
3728{
3729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3730
3731 return dd->sw_ctxt_err_status_cnt[0];
3732}
3733
3734/*
3735 * Software counters corresponding to each of the
3736 * error status bits within SendDmaEngErrStatus
3737 */
3738static u64 access_sdma_header_request_fifo_cor_err_cnt(
3739 const struct cntr_entry *entry,
3740 void *context, int vl, int mode, u64 data)
3741{
3742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3743
3744 return dd->sw_send_dma_eng_err_status_cnt[23];
3745}
3746
3747static u64 access_sdma_header_storage_cor_err_cnt(
3748 const struct cntr_entry *entry,
3749 void *context, int vl, int mode, u64 data)
3750{
3751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3752
3753 return dd->sw_send_dma_eng_err_status_cnt[22];
3754}
3755
3756static u64 access_sdma_packet_tracking_cor_err_cnt(
3757 const struct cntr_entry *entry,
3758 void *context, int vl, int mode, u64 data)
3759{
3760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761
3762 return dd->sw_send_dma_eng_err_status_cnt[21];
3763}
3764
3765static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3766 void *context, int vl, int mode,
3767 u64 data)
3768{
3769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770
3771 return dd->sw_send_dma_eng_err_status_cnt[20];
3772}
3773
3774static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3775 void *context, int vl, int mode,
3776 u64 data)
3777{
3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779
3780 return dd->sw_send_dma_eng_err_status_cnt[19];
3781}
3782
3783static u64 access_sdma_header_request_fifo_unc_err_cnt(
3784 const struct cntr_entry *entry,
3785 void *context, int vl, int mode, u64 data)
3786{
3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3788
3789 return dd->sw_send_dma_eng_err_status_cnt[18];
3790}
3791
3792static u64 access_sdma_header_storage_unc_err_cnt(
3793 const struct cntr_entry *entry,
3794 void *context, int vl, int mode, u64 data)
3795{
3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3797
3798 return dd->sw_send_dma_eng_err_status_cnt[17];
3799}
3800
3801static u64 access_sdma_packet_tracking_unc_err_cnt(
3802 const struct cntr_entry *entry,
3803 void *context, int vl, int mode, u64 data)
3804{
3805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3806
3807 return dd->sw_send_dma_eng_err_status_cnt[16];
3808}
3809
3810static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3811 void *context, int vl, int mode,
3812 u64 data)
3813{
3814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3815
3816 return dd->sw_send_dma_eng_err_status_cnt[15];
3817}
3818
3819static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3820 void *context, int vl, int mode,
3821 u64 data)
3822{
3823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3824
3825 return dd->sw_send_dma_eng_err_status_cnt[14];
3826}
3827
3828static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3829 void *context, int vl, int mode,
3830 u64 data)
3831{
3832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834 return dd->sw_send_dma_eng_err_status_cnt[13];
3835}
3836
3837static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3838 void *context, int vl, int mode,
3839 u64 data)
3840{
3841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843 return dd->sw_send_dma_eng_err_status_cnt[12];
3844}
3845
3846static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3847 void *context, int vl, int mode,
3848 u64 data)
3849{
3850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852 return dd->sw_send_dma_eng_err_status_cnt[11];
3853}
3854
3855static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3856 void *context, int vl, int mode,
3857 u64 data)
3858{
3859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861 return dd->sw_send_dma_eng_err_status_cnt[10];
3862}
3863
3864static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3865 void *context, int vl, int mode,
3866 u64 data)
3867{
3868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870 return dd->sw_send_dma_eng_err_status_cnt[9];
3871}
3872
3873static u64 access_sdma_packet_desc_overflow_err_cnt(
3874 const struct cntr_entry *entry,
3875 void *context, int vl, int mode, u64 data)
3876{
3877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879 return dd->sw_send_dma_eng_err_status_cnt[8];
3880}
3881
3882static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3883 void *context, int vl,
3884 int mode, u64 data)
3885{
3886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888 return dd->sw_send_dma_eng_err_status_cnt[7];
3889}
3890
3891static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3892 void *context, int vl, int mode, u64 data)
3893{
3894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3895
3896 return dd->sw_send_dma_eng_err_status_cnt[6];
3897}
3898
3899static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3900 void *context, int vl, int mode,
3901 u64 data)
3902{
3903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3904
3905 return dd->sw_send_dma_eng_err_status_cnt[5];
3906}
3907
3908static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3909 void *context, int vl, int mode,
3910 u64 data)
3911{
3912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3913
3914 return dd->sw_send_dma_eng_err_status_cnt[4];
3915}
3916
3917static u64 access_sdma_tail_out_of_bounds_err_cnt(
3918 const struct cntr_entry *entry,
3919 void *context, int vl, int mode, u64 data)
3920{
3921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3922
3923 return dd->sw_send_dma_eng_err_status_cnt[3];
3924}
3925
3926static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3927 void *context, int vl, int mode,
3928 u64 data)
3929{
3930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3931
3932 return dd->sw_send_dma_eng_err_status_cnt[2];
3933}
3934
3935static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3936 void *context, int vl, int mode,
3937 u64 data)
3938{
3939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3940
3941 return dd->sw_send_dma_eng_err_status_cnt[1];
3942}
3943
3944static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3945 void *context, int vl, int mode,
3946 u64 data)
3947{
3948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3949
3950 return dd->sw_send_dma_eng_err_status_cnt[0];
3951}
3952
Jakub Pawlak2b719042016-07-01 16:01:22 -07003953static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3954 void *context, int vl, int mode,
3955 u64 data)
3956{
3957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3958
3959 u64 val = 0;
3960 u64 csr = entry->csr;
3961
3962 val = read_write_csr(dd, csr, mode, data);
3963 if (mode == CNTR_MODE_R) {
3964 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3965 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3966 } else if (mode == CNTR_MODE_W) {
3967 dd->sw_rcv_bypass_packet_errors = 0;
3968 } else {
3969 dd_dev_err(dd, "Invalid cntr register access mode");
3970 return 0;
3971 }
3972 return val;
3973}
3974
Mike Marciniszyn77241052015-07-30 15:17:43 -04003975#define def_access_sw_cpu(cntr) \
3976static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3977 void *context, int vl, int mode, u64 data) \
3978{ \
3979 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003980 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3981 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003982 mode, data); \
3983}
3984
3985def_access_sw_cpu(rc_acks);
3986def_access_sw_cpu(rc_qacks);
3987def_access_sw_cpu(rc_delayed_comp);
3988
3989#define def_access_ibp_counter(cntr) \
3990static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3991 void *context, int vl, int mode, u64 data) \
3992{ \
3993 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3994 \
3995 if (vl != CNTR_INVALID_VL) \
3996 return 0; \
3997 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003998 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003999 mode, data); \
4000}
4001
4002def_access_ibp_counter(loop_pkts);
4003def_access_ibp_counter(rc_resends);
4004def_access_ibp_counter(rnr_naks);
4005def_access_ibp_counter(other_naks);
4006def_access_ibp_counter(rc_timeouts);
4007def_access_ibp_counter(pkt_drops);
4008def_access_ibp_counter(dmawait);
4009def_access_ibp_counter(rc_seqnak);
4010def_access_ibp_counter(rc_dupreq);
4011def_access_ibp_counter(rdma_seq);
4012def_access_ibp_counter(unaligned);
4013def_access_ibp_counter(seq_naks);
4014
4015static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4016[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4017[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4018 CNTR_NORMAL),
4019[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4020 CNTR_NORMAL),
4021[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4022 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4023 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004024[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4025 CNTR_NORMAL),
4026[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4027 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4028[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4029 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4030[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4031 CNTR_NORMAL),
4032[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4033 CNTR_NORMAL),
4034[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4035 CNTR_NORMAL),
4036[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4037 CNTR_NORMAL),
4038[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4039 CNTR_NORMAL),
4040[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4041 CNTR_NORMAL),
4042[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4043 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4044[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4045 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4046[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4047 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004048[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4049 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004050[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4051 CNTR_SYNTH),
4052[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4053 CNTR_SYNTH),
4054[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4055 CNTR_SYNTH),
4056[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4057 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4058[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4059 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4060 CNTR_SYNTH),
4061[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4062 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4063[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4064 CNTR_SYNTH),
4065[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4066 CNTR_SYNTH),
4067[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4068 CNTR_SYNTH),
4069[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4070 CNTR_SYNTH),
4071[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4072 CNTR_SYNTH),
4073[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4074 CNTR_SYNTH),
4075[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4076 CNTR_SYNTH),
4077[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4078 CNTR_SYNTH | CNTR_VL),
4079[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4080 CNTR_SYNTH | CNTR_VL),
4081[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4082[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4083 CNTR_SYNTH | CNTR_VL),
4084[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4085[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4086 CNTR_SYNTH | CNTR_VL),
4087[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4088 CNTR_SYNTH),
4089[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4090 CNTR_SYNTH | CNTR_VL),
4091[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4092 CNTR_SYNTH),
4093[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4094 CNTR_SYNTH | CNTR_VL),
4095[C_DC_TOTAL_CRC] =
4096 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4097 CNTR_SYNTH),
4098[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4099 CNTR_SYNTH),
4100[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4101 CNTR_SYNTH),
4102[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4103 CNTR_SYNTH),
4104[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4105 CNTR_SYNTH),
4106[C_DC_CRC_MULT_LN] =
4107 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4108 CNTR_SYNTH),
4109[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4110 CNTR_SYNTH),
4111[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4112 CNTR_SYNTH),
4113[C_DC_SEQ_CRC_CNT] =
4114 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4115 CNTR_SYNTH),
4116[C_DC_ESC0_ONLY_CNT] =
4117 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4118 CNTR_SYNTH),
4119[C_DC_ESC0_PLUS1_CNT] =
4120 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4121 CNTR_SYNTH),
4122[C_DC_ESC0_PLUS2_CNT] =
4123 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4124 CNTR_SYNTH),
4125[C_DC_REINIT_FROM_PEER_CNT] =
4126 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4127 CNTR_SYNTH),
4128[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4129 CNTR_SYNTH),
4130[C_DC_MISC_FLG_CNT] =
4131 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4132 CNTR_SYNTH),
4133[C_DC_PRF_GOOD_LTP_CNT] =
4134 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4135[C_DC_PRF_ACCEPTED_LTP_CNT] =
4136 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4137 CNTR_SYNTH),
4138[C_DC_PRF_RX_FLIT_CNT] =
4139 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4140[C_DC_PRF_TX_FLIT_CNT] =
4141 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4142[C_DC_PRF_CLK_CNTR] =
4143 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4144[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4145 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4146[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4147 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4148 CNTR_SYNTH),
4149[C_DC_PG_STS_TX_SBE_CNT] =
4150 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4151[C_DC_PG_STS_TX_MBE_CNT] =
4152 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4153 CNTR_SYNTH),
4154[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4155 access_sw_cpu_intr),
4156[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4157 access_sw_cpu_rcv_limit),
4158[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4159 access_sw_vtx_wait),
4160[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4161 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004162[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4163 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004164[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4165 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004166[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4167 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004168[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4169 SEND_DMA_DESC_FETCHED_CNT, 0,
4170 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4171 dev_access_u32_csr),
4172[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4173 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4174 access_sde_int_cnt),
4175[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4176 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4177 access_sde_err_cnt),
4178[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4179 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4180 access_sde_idle_int_cnt),
4181[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4182 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4183 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004184/* MISC_ERR_STATUS */
4185[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4186 CNTR_NORMAL,
4187 access_misc_pll_lock_fail_err_cnt),
4188[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4189 CNTR_NORMAL,
4190 access_misc_mbist_fail_err_cnt),
4191[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4192 CNTR_NORMAL,
4193 access_misc_invalid_eep_cmd_err_cnt),
4194[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4195 CNTR_NORMAL,
4196 access_misc_efuse_done_parity_err_cnt),
4197[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4198 CNTR_NORMAL,
4199 access_misc_efuse_write_err_cnt),
4200[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4201 0, CNTR_NORMAL,
4202 access_misc_efuse_read_bad_addr_err_cnt),
4203[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4204 CNTR_NORMAL,
4205 access_misc_efuse_csr_parity_err_cnt),
4206[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4207 CNTR_NORMAL,
4208 access_misc_fw_auth_failed_err_cnt),
4209[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4210 CNTR_NORMAL,
4211 access_misc_key_mismatch_err_cnt),
4212[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4213 CNTR_NORMAL,
4214 access_misc_sbus_write_failed_err_cnt),
4215[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4216 CNTR_NORMAL,
4217 access_misc_csr_write_bad_addr_err_cnt),
4218[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4219 CNTR_NORMAL,
4220 access_misc_csr_read_bad_addr_err_cnt),
4221[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4222 CNTR_NORMAL,
4223 access_misc_csr_parity_err_cnt),
4224/* CceErrStatus */
4225[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4226 CNTR_NORMAL,
4227 access_sw_cce_err_status_aggregated_cnt),
4228[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4229 CNTR_NORMAL,
4230 access_cce_msix_csr_parity_err_cnt),
4231[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4232 CNTR_NORMAL,
4233 access_cce_int_map_unc_err_cnt),
4234[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4235 CNTR_NORMAL,
4236 access_cce_int_map_cor_err_cnt),
4237[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4238 CNTR_NORMAL,
4239 access_cce_msix_table_unc_err_cnt),
4240[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4241 CNTR_NORMAL,
4242 access_cce_msix_table_cor_err_cnt),
4243[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4244 0, CNTR_NORMAL,
4245 access_cce_rxdma_conv_fifo_parity_err_cnt),
4246[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4247 0, CNTR_NORMAL,
4248 access_cce_rcpl_async_fifo_parity_err_cnt),
4249[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4250 CNTR_NORMAL,
4251 access_cce_seg_write_bad_addr_err_cnt),
4252[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4253 CNTR_NORMAL,
4254 access_cce_seg_read_bad_addr_err_cnt),
4255[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4256 CNTR_NORMAL,
4257 access_la_triggered_cnt),
4258[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4259 CNTR_NORMAL,
4260 access_cce_trgt_cpl_timeout_err_cnt),
4261[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4262 CNTR_NORMAL,
4263 access_pcic_receive_parity_err_cnt),
4264[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4265 CNTR_NORMAL,
4266 access_pcic_transmit_back_parity_err_cnt),
4267[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4268 0, CNTR_NORMAL,
4269 access_pcic_transmit_front_parity_err_cnt),
4270[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4271 CNTR_NORMAL,
4272 access_pcic_cpl_dat_q_unc_err_cnt),
4273[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4274 CNTR_NORMAL,
4275 access_pcic_cpl_hd_q_unc_err_cnt),
4276[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_pcic_post_dat_q_unc_err_cnt),
4279[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_pcic_post_hd_q_unc_err_cnt),
4282[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4283 CNTR_NORMAL,
4284 access_pcic_retry_sot_mem_unc_err_cnt),
4285[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4286 CNTR_NORMAL,
4287 access_pcic_retry_mem_unc_err),
4288[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4289 CNTR_NORMAL,
4290 access_pcic_n_post_dat_q_parity_err_cnt),
4291[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4292 CNTR_NORMAL,
4293 access_pcic_n_post_h_q_parity_err_cnt),
4294[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4295 CNTR_NORMAL,
4296 access_pcic_cpl_dat_q_cor_err_cnt),
4297[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4298 CNTR_NORMAL,
4299 access_pcic_cpl_hd_q_cor_err_cnt),
4300[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_pcic_post_dat_q_cor_err_cnt),
4303[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4304 CNTR_NORMAL,
4305 access_pcic_post_hd_q_cor_err_cnt),
4306[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4307 CNTR_NORMAL,
4308 access_pcic_retry_sot_mem_cor_err_cnt),
4309[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4310 CNTR_NORMAL,
4311 access_pcic_retry_mem_cor_err_cnt),
4312[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4313 "CceCli1AsyncFifoDbgParityError", 0, 0,
4314 CNTR_NORMAL,
4315 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4316[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4317 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4318 CNTR_NORMAL,
4319 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4320 ),
4321[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4322 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4323 CNTR_NORMAL,
4324 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4325[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4326 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4329[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4330 0, CNTR_NORMAL,
4331 access_cce_cli2_async_fifo_parity_err_cnt),
4332[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_cce_csr_cfg_bus_parity_err_cnt),
4335[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4336 0, CNTR_NORMAL,
4337 access_cce_cli0_async_fifo_parity_err_cnt),
4338[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_cce_rspd_data_parity_err_cnt),
4341[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4342 CNTR_NORMAL,
4343 access_cce_trgt_access_err_cnt),
4344[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4345 0, CNTR_NORMAL,
4346 access_cce_trgt_async_fifo_parity_err_cnt),
4347[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_cce_csr_write_bad_addr_err_cnt),
4350[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4351 CNTR_NORMAL,
4352 access_cce_csr_read_bad_addr_err_cnt),
4353[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4354 CNTR_NORMAL,
4355 access_ccs_csr_parity_err_cnt),
4356
4357/* RcvErrStatus */
4358[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_rx_csr_parity_err_cnt),
4361[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4362 CNTR_NORMAL,
4363 access_rx_csr_write_bad_addr_err_cnt),
4364[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_rx_csr_read_bad_addr_err_cnt),
4367[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_rx_dma_csr_unc_err_cnt),
4370[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_rx_dma_dq_fsm_encoding_err_cnt),
4373[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_rx_dma_eq_fsm_encoding_err_cnt),
4376[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_rx_dma_csr_parity_err_cnt),
4379[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_rx_rbuf_data_cor_err_cnt),
4382[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_rbuf_data_unc_err_cnt),
4385[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_dma_data_fifo_rd_cor_err_cnt),
4388[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_dma_data_fifo_rd_unc_err_cnt),
4391[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4394[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4397[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4398 CNTR_NORMAL,
4399 access_rx_rbuf_desc_part2_cor_err_cnt),
4400[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4401 CNTR_NORMAL,
4402 access_rx_rbuf_desc_part2_unc_err_cnt),
4403[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_rbuf_desc_part1_cor_err_cnt),
4406[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_desc_part1_unc_err_cnt),
4409[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_hq_intr_fsm_err_cnt),
4412[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4413 CNTR_NORMAL,
4414 access_rx_hq_intr_csr_parity_err_cnt),
4415[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rx_lookup_csr_parity_err_cnt),
4418[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rx_lookup_rcv_array_cor_err_cnt),
4421[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4422 CNTR_NORMAL,
4423 access_rx_lookup_rcv_array_unc_err_cnt),
4424[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4425 0, CNTR_NORMAL,
4426 access_rx_lookup_des_part2_parity_err_cnt),
4427[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4428 0, CNTR_NORMAL,
4429 access_rx_lookup_des_part1_unc_cor_err_cnt),
4430[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4431 CNTR_NORMAL,
4432 access_rx_lookup_des_part1_unc_err_cnt),
4433[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4434 CNTR_NORMAL,
4435 access_rx_rbuf_next_free_buf_cor_err_cnt),
4436[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4437 CNTR_NORMAL,
4438 access_rx_rbuf_next_free_buf_unc_err_cnt),
4439[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4440 "RxRbufFlInitWrAddrParityErr", 0, 0,
4441 CNTR_NORMAL,
4442 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4443[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4444 0, CNTR_NORMAL,
4445 access_rx_rbuf_fl_initdone_parity_err_cnt),
4446[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4447 0, CNTR_NORMAL,
4448 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4449[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4450 CNTR_NORMAL,
4451 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4452[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4453 CNTR_NORMAL,
4454 access_rx_rbuf_empty_err_cnt),
4455[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4456 CNTR_NORMAL,
4457 access_rx_rbuf_full_err_cnt),
4458[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_rbuf_bad_lookup_err_cnt),
4461[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rbuf_ctx_id_parity_err_cnt),
4464[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_rbuf_csr_qeopdw_parity_err_cnt),
4467[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4468 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4469 CNTR_NORMAL,
4470 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4471[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4472 "RxRbufCsrQTlPtrParityErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4475[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4476 0, CNTR_NORMAL,
4477 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4478[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4479 0, CNTR_NORMAL,
4480 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4481[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4482 0, 0, CNTR_NORMAL,
4483 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4484[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4485 0, CNTR_NORMAL,
4486 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4487[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4488 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4491[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4492 0, CNTR_NORMAL,
4493 access_rx_rbuf_block_list_read_cor_err_cnt),
4494[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4495 0, CNTR_NORMAL,
4496 access_rx_rbuf_block_list_read_unc_err_cnt),
4497[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_rbuf_lookup_des_cor_err_cnt),
4500[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_rbuf_lookup_des_unc_err_cnt),
4503[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4504 "RxRbufLookupDesRegUncCorErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4507[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4508 CNTR_NORMAL,
4509 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4510[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rbuf_free_list_cor_err_cnt),
4513[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_rx_rbuf_free_list_unc_err_cnt),
4516[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_rx_rcv_fsm_encoding_err_cnt),
4519[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_rx_dma_flag_cor_err_cnt),
4522[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_rx_dma_flag_unc_err_cnt),
4525[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rx_dc_sop_eop_parity_err_cnt),
4528[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4529 CNTR_NORMAL,
4530 access_rx_rcv_csr_parity_err_cnt),
4531[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4532 CNTR_NORMAL,
4533 access_rx_rcv_qp_map_table_cor_err_cnt),
4534[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_rx_rcv_qp_map_table_unc_err_cnt),
4537[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_rx_rcv_data_cor_err_cnt),
4540[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_rx_rcv_data_unc_err_cnt),
4543[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4544 CNTR_NORMAL,
4545 access_rx_rcv_hdr_cor_err_cnt),
4546[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4547 CNTR_NORMAL,
4548 access_rx_rcv_hdr_unc_err_cnt),
4549[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_rx_dc_intf_parity_err_cnt),
4552[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_rx_dma_csr_cor_err_cnt),
4555/* SendPioErrStatus */
4556[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_pio_pec_sop_head_parity_err_cnt),
4559[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4560 CNTR_NORMAL,
4561 access_pio_pcc_sop_head_parity_err_cnt),
4562[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4563 0, 0, CNTR_NORMAL,
4564 access_pio_last_returned_cnt_parity_err_cnt),
4565[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4566 0, CNTR_NORMAL,
4567 access_pio_current_free_cnt_parity_err_cnt),
4568[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4569 CNTR_NORMAL,
4570 access_pio_reserved_31_err_cnt),
4571[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4572 CNTR_NORMAL,
4573 access_pio_reserved_30_err_cnt),
4574[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4575 CNTR_NORMAL,
4576 access_pio_ppmc_sop_len_err_cnt),
4577[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4578 CNTR_NORMAL,
4579 access_pio_ppmc_bqc_mem_parity_err_cnt),
4580[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_pio_vl_fifo_parity_err_cnt),
4583[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4584 CNTR_NORMAL,
4585 access_pio_vlf_sop_parity_err_cnt),
4586[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_pio_vlf_v1_len_parity_err_cnt),
4589[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4590 CNTR_NORMAL,
4591 access_pio_block_qw_count_parity_err_cnt),
4592[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_write_qw_valid_parity_err_cnt),
4595[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_state_machine_err_cnt),
4598[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_write_data_parity_err_cnt),
4601[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_host_addr_mem_cor_err_cnt),
4604[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_host_addr_mem_unc_err_cnt),
4607[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4610[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4611 CNTR_NORMAL,
4612 access_pio_init_sm_in_err_cnt),
4613[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_ppmc_pbl_fifo_err_cnt),
4616[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4617 0, CNTR_NORMAL,
4618 access_pio_credit_ret_fifo_parity_err_cnt),
4619[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4620 CNTR_NORMAL,
4621 access_pio_v1_len_mem_bank1_cor_err_cnt),
4622[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4623 CNTR_NORMAL,
4624 access_pio_v1_len_mem_bank0_cor_err_cnt),
4625[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_pio_v1_len_mem_bank1_unc_err_cnt),
4628[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4629 CNTR_NORMAL,
4630 access_pio_v1_len_mem_bank0_unc_err_cnt),
4631[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4632 CNTR_NORMAL,
4633 access_pio_sm_pkt_reset_parity_err_cnt),
4634[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4635 CNTR_NORMAL,
4636 access_pio_pkt_evict_fifo_parity_err_cnt),
4637[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4638 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4641[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4642 CNTR_NORMAL,
4643 access_pio_sbrdctl_crrel_parity_err_cnt),
4644[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4645 CNTR_NORMAL,
4646 access_pio_pec_fifo_parity_err_cnt),
4647[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_pio_pcc_fifo_parity_err_cnt),
4650[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4651 CNTR_NORMAL,
4652 access_pio_sb_mem_fifo1_err_cnt),
4653[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4654 CNTR_NORMAL,
4655 access_pio_sb_mem_fifo0_err_cnt),
4656[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4657 CNTR_NORMAL,
4658 access_pio_csr_parity_err_cnt),
4659[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_pio_write_addr_parity_err_cnt),
4662[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_pio_write_bad_ctxt_err_cnt),
4665/* SendDmaErrStatus */
4666[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4667 0, CNTR_NORMAL,
4668 access_sdma_pcie_req_tracking_cor_err_cnt),
4669[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4670 0, CNTR_NORMAL,
4671 access_sdma_pcie_req_tracking_unc_err_cnt),
4672[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_sdma_csr_parity_err_cnt),
4675[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_sdma_rpy_tag_err_cnt),
4678/* SendEgressErrStatus */
4679[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_tx_read_pio_memory_csr_unc_err_cnt),
4682[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4683 0, CNTR_NORMAL,
4684 access_tx_read_sdma_memory_csr_err_cnt),
4685[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_tx_egress_fifo_cor_err_cnt),
4688[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_tx_read_pio_memory_cor_err_cnt),
4691[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_tx_read_sdma_memory_cor_err_cnt),
4694[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_tx_sb_hdr_cor_err_cnt),
4697[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_tx_credit_overrun_err_cnt),
4700[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_tx_launch_fifo8_cor_err_cnt),
4703[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_tx_launch_fifo7_cor_err_cnt),
4706[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_tx_launch_fifo6_cor_err_cnt),
4709[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_tx_launch_fifo5_cor_err_cnt),
4712[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_tx_launch_fifo4_cor_err_cnt),
4715[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_tx_launch_fifo3_cor_err_cnt),
4718[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_tx_launch_fifo2_cor_err_cnt),
4721[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4722 CNTR_NORMAL,
4723 access_tx_launch_fifo1_cor_err_cnt),
4724[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4725 CNTR_NORMAL,
4726 access_tx_launch_fifo0_cor_err_cnt),
4727[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4728 CNTR_NORMAL,
4729 access_tx_credit_return_vl_err_cnt),
4730[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4731 CNTR_NORMAL,
4732 access_tx_hcrc_insertion_err_cnt),
4733[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4734 CNTR_NORMAL,
4735 access_tx_egress_fifo_unc_err_cnt),
4736[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4737 CNTR_NORMAL,
4738 access_tx_read_pio_memory_unc_err_cnt),
4739[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4740 CNTR_NORMAL,
4741 access_tx_read_sdma_memory_unc_err_cnt),
4742[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4743 CNTR_NORMAL,
4744 access_tx_sb_hdr_unc_err_cnt),
4745[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4746 CNTR_NORMAL,
4747 access_tx_credit_return_partiy_err_cnt),
4748[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4749 0, 0, CNTR_NORMAL,
4750 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4751[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4752 0, 0, CNTR_NORMAL,
4753 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4754[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4755 0, 0, CNTR_NORMAL,
4756 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4757[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4758 0, 0, CNTR_NORMAL,
4759 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4760[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4761 0, 0, CNTR_NORMAL,
4762 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4763[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4764 0, 0, CNTR_NORMAL,
4765 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4766[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4767 0, 0, CNTR_NORMAL,
4768 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4769[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4770 0, 0, CNTR_NORMAL,
4771 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4772[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4773 0, 0, CNTR_NORMAL,
4774 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4775[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4776 0, 0, CNTR_NORMAL,
4777 access_tx_sdma15_disallowed_packet_err_cnt),
4778[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4779 0, 0, CNTR_NORMAL,
4780 access_tx_sdma14_disallowed_packet_err_cnt),
4781[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4782 0, 0, CNTR_NORMAL,
4783 access_tx_sdma13_disallowed_packet_err_cnt),
4784[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4785 0, 0, CNTR_NORMAL,
4786 access_tx_sdma12_disallowed_packet_err_cnt),
4787[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4788 0, 0, CNTR_NORMAL,
4789 access_tx_sdma11_disallowed_packet_err_cnt),
4790[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4791 0, 0, CNTR_NORMAL,
4792 access_tx_sdma10_disallowed_packet_err_cnt),
4793[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4794 0, 0, CNTR_NORMAL,
4795 access_tx_sdma9_disallowed_packet_err_cnt),
4796[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4797 0, 0, CNTR_NORMAL,
4798 access_tx_sdma8_disallowed_packet_err_cnt),
4799[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4800 0, 0, CNTR_NORMAL,
4801 access_tx_sdma7_disallowed_packet_err_cnt),
4802[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4803 0, 0, CNTR_NORMAL,
4804 access_tx_sdma6_disallowed_packet_err_cnt),
4805[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4806 0, 0, CNTR_NORMAL,
4807 access_tx_sdma5_disallowed_packet_err_cnt),
4808[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4809 0, 0, CNTR_NORMAL,
4810 access_tx_sdma4_disallowed_packet_err_cnt),
4811[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4812 0, 0, CNTR_NORMAL,
4813 access_tx_sdma3_disallowed_packet_err_cnt),
4814[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4815 0, 0, CNTR_NORMAL,
4816 access_tx_sdma2_disallowed_packet_err_cnt),
4817[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4818 0, 0, CNTR_NORMAL,
4819 access_tx_sdma1_disallowed_packet_err_cnt),
4820[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4821 0, 0, CNTR_NORMAL,
4822 access_tx_sdma0_disallowed_packet_err_cnt),
4823[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_config_parity_err_cnt),
4826[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4827 CNTR_NORMAL,
4828 access_tx_sbrd_ctl_csr_parity_err_cnt),
4829[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_launch_csr_parity_err_cnt),
4832[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_tx_illegal_vl_err_cnt),
4835[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4836 "TxSbrdCtlStateMachineParityErr", 0, 0,
4837 CNTR_NORMAL,
4838 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4839[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4840 CNTR_NORMAL,
4841 access_egress_reserved_10_err_cnt),
4842[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4843 CNTR_NORMAL,
4844 access_egress_reserved_9_err_cnt),
4845[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4846 0, 0, CNTR_NORMAL,
4847 access_tx_sdma_launch_intf_parity_err_cnt),
4848[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4849 CNTR_NORMAL,
4850 access_tx_pio_launch_intf_parity_err_cnt),
4851[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4852 CNTR_NORMAL,
4853 access_egress_reserved_6_err_cnt),
4854[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4855 CNTR_NORMAL,
4856 access_tx_incorrect_link_state_err_cnt),
4857[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4858 CNTR_NORMAL,
4859 access_tx_linkdown_err_cnt),
4860[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4861 "EgressFifoUnderrunOrParityErr", 0, 0,
4862 CNTR_NORMAL,
4863 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4864[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4865 CNTR_NORMAL,
4866 access_egress_reserved_2_err_cnt),
4867[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4868 CNTR_NORMAL,
4869 access_tx_pkt_integrity_mem_unc_err_cnt),
4870[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4871 CNTR_NORMAL,
4872 access_tx_pkt_integrity_mem_cor_err_cnt),
4873/* SendErrStatus */
4874[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_send_csr_write_bad_addr_err_cnt),
4877[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4878 CNTR_NORMAL,
4879 access_send_csr_read_bad_addr_err_cnt),
4880[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4881 CNTR_NORMAL,
4882 access_send_csr_parity_cnt),
4883/* SendCtxtErrStatus */
4884[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_pio_write_out_of_bounds_err_cnt),
4887[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_pio_write_overflow_err_cnt),
4890[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4891 0, 0, CNTR_NORMAL,
4892 access_pio_write_crosses_boundary_err_cnt),
4893[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_pio_disallowed_packet_err_cnt),
4896[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4897 CNTR_NORMAL,
4898 access_pio_inconsistent_sop_err_cnt),
4899/* SendDmaEngErrStatus */
4900[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4901 0, 0, CNTR_NORMAL,
4902 access_sdma_header_request_fifo_cor_err_cnt),
4903[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4904 CNTR_NORMAL,
4905 access_sdma_header_storage_cor_err_cnt),
4906[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4907 CNTR_NORMAL,
4908 access_sdma_packet_tracking_cor_err_cnt),
4909[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4910 CNTR_NORMAL,
4911 access_sdma_assembly_cor_err_cnt),
4912[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_sdma_desc_table_cor_err_cnt),
4915[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4916 0, 0, CNTR_NORMAL,
4917 access_sdma_header_request_fifo_unc_err_cnt),
4918[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_sdma_header_storage_unc_err_cnt),
4921[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_sdma_packet_tracking_unc_err_cnt),
4924[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_sdma_assembly_unc_err_cnt),
4927[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4928 CNTR_NORMAL,
4929 access_sdma_desc_table_unc_err_cnt),
4930[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_sdma_timeout_err_cnt),
4933[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_sdma_header_length_err_cnt),
4936[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4937 CNTR_NORMAL,
4938 access_sdma_header_address_err_cnt),
4939[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4940 CNTR_NORMAL,
4941 access_sdma_header_select_err_cnt),
4942[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4943 CNTR_NORMAL,
4944 access_sdma_reserved_9_err_cnt),
4945[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4946 CNTR_NORMAL,
4947 access_sdma_packet_desc_overflow_err_cnt),
4948[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4949 CNTR_NORMAL,
4950 access_sdma_length_mismatch_err_cnt),
4951[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4952 CNTR_NORMAL,
4953 access_sdma_halt_err_cnt),
4954[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4955 CNTR_NORMAL,
4956 access_sdma_mem_read_err_cnt),
4957[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4958 CNTR_NORMAL,
4959 access_sdma_first_desc_err_cnt),
4960[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4961 CNTR_NORMAL,
4962 access_sdma_tail_out_of_bounds_err_cnt),
4963[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4964 CNTR_NORMAL,
4965 access_sdma_too_long_err_cnt),
4966[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4967 CNTR_NORMAL,
4968 access_sdma_gen_mismatch_err_cnt),
4969[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4970 CNTR_NORMAL,
4971 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004972};
4973
4974static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4975[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4976 CNTR_NORMAL),
4977[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4978 CNTR_NORMAL),
4979[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4980 CNTR_NORMAL),
4981[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4982 CNTR_NORMAL),
4983[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4984 CNTR_NORMAL),
4985[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4986 CNTR_NORMAL),
4987[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4988 CNTR_NORMAL),
4989[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4990[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4991[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4992[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004993 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004994[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004995 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004996[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004997 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004998[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4999[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5000[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005001 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005002[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005003 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005004[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5005 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005006[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005007 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005008[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005009 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5010 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005011[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005012 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005013[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005014 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005015[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5016[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5017[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5018[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5019[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5020[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5021[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5022[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5023[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5024[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5025[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5026[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5027[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5028 access_sw_cpu_rc_acks),
5029[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005030 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005031[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005032 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005033[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5034[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5035[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5036[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5037[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5038[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5039[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5040[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5041[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5042[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5043[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5044[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5045[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5046[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5047[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5048[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5049[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5050[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5051[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5052[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5053[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5054[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5055[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5056[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5057[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5058[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5059[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5060[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5061[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5062[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5063[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5064[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5065[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5066[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5067[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5068[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5069[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5070[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5071[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5072[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5073[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5074[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5075[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5076[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5077[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5078[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5079[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5080[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5081[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5082[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5083[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5084[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5085[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5086[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5087[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5088[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5089[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5090[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5091[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5092[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5093[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5094[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5095[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5096[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5097[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5098[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5099[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5100[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5101[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5102[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5103[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5104[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5105[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5106[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5107[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5108[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5109[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5110[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5111[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5112[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5113};
5114
5115/* ======================================================================== */
5116
Mike Marciniszyn77241052015-07-30 15:17:43 -04005117/* return true if this is chip revision revision a */
5118int is_ax(struct hfi1_devdata *dd)
5119{
5120 u8 chip_rev_minor =
5121 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5122 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5123 return (chip_rev_minor & 0xf0) == 0;
5124}
5125
5126/* return true if this is chip revision revision b */
5127int is_bx(struct hfi1_devdata *dd)
5128{
5129 u8 chip_rev_minor =
5130 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5131 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005132 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005133}
5134
5135/*
5136 * Append string s to buffer buf. Arguments curp and len are the current
5137 * position and remaining length, respectively.
5138 *
5139 * return 0 on success, 1 on out of room
5140 */
5141static int append_str(char *buf, char **curp, int *lenp, const char *s)
5142{
5143 char *p = *curp;
5144 int len = *lenp;
5145 int result = 0; /* success */
5146 char c;
5147
5148 /* add a comma, if first in the buffer */
5149 if (p != buf) {
5150 if (len == 0) {
5151 result = 1; /* out of room */
5152 goto done;
5153 }
5154 *p++ = ',';
5155 len--;
5156 }
5157
5158 /* copy the string */
5159 while ((c = *s++) != 0) {
5160 if (len == 0) {
5161 result = 1; /* out of room */
5162 goto done;
5163 }
5164 *p++ = c;
5165 len--;
5166 }
5167
5168done:
5169 /* write return values */
5170 *curp = p;
5171 *lenp = len;
5172
5173 return result;
5174}
5175
5176/*
5177 * Using the given flag table, print a comma separated string into
5178 * the buffer. End in '*' if the buffer is too short.
5179 */
5180static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005181 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005182{
5183 char extra[32];
5184 char *p = buf;
5185 int len = buf_len;
5186 int no_room = 0;
5187 int i;
5188
5189 /* make sure there is at least 2 so we can form "*" */
5190 if (len < 2)
5191 return "";
5192
5193 len--; /* leave room for a nul */
5194 for (i = 0; i < table_size; i++) {
5195 if (flags & table[i].flag) {
5196 no_room = append_str(buf, &p, &len, table[i].str);
5197 if (no_room)
5198 break;
5199 flags &= ~table[i].flag;
5200 }
5201 }
5202
5203 /* any undocumented bits left? */
5204 if (!no_room && flags) {
5205 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5206 no_room = append_str(buf, &p, &len, extra);
5207 }
5208
5209 /* add * if ran out of room */
5210 if (no_room) {
5211 /* may need to back up to add space for a '*' */
5212 if (len == 0)
5213 --p;
5214 *p++ = '*';
5215 }
5216
5217 /* add final nul - space already allocated above */
5218 *p = 0;
5219 return buf;
5220}
5221
5222/* first 8 CCE error interrupt source names */
5223static const char * const cce_misc_names[] = {
5224 "CceErrInt", /* 0 */
5225 "RxeErrInt", /* 1 */
5226 "MiscErrInt", /* 2 */
5227 "Reserved3", /* 3 */
5228 "PioErrInt", /* 4 */
5229 "SDmaErrInt", /* 5 */
5230 "EgressErrInt", /* 6 */
5231 "TxeErrInt" /* 7 */
5232};
5233
5234/*
5235 * Return the miscellaneous error interrupt name.
5236 */
5237static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5238{
5239 if (source < ARRAY_SIZE(cce_misc_names))
5240 strncpy(buf, cce_misc_names[source], bsize);
5241 else
Jubin John17fb4f22016-02-14 20:21:52 -08005242 snprintf(buf, bsize, "Reserved%u",
5243 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005244
5245 return buf;
5246}
5247
5248/*
5249 * Return the SDMA engine error interrupt name.
5250 */
5251static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5252{
5253 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5254 return buf;
5255}
5256
5257/*
5258 * Return the send context error interrupt name.
5259 */
5260static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5261{
5262 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5263 return buf;
5264}
5265
5266static const char * const various_names[] = {
5267 "PbcInt",
5268 "GpioAssertInt",
5269 "Qsfp1Int",
5270 "Qsfp2Int",
5271 "TCritInt"
5272};
5273
5274/*
5275 * Return the various interrupt name.
5276 */
5277static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5278{
5279 if (source < ARRAY_SIZE(various_names))
5280 strncpy(buf, various_names[source], bsize);
5281 else
Jubin John8638b772016-02-14 20:19:24 -08005282 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005283 return buf;
5284}
5285
5286/*
5287 * Return the DC interrupt name.
5288 */
5289static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5290{
5291 static const char * const dc_int_names[] = {
5292 "common",
5293 "lcb",
5294 "8051",
5295 "lbm" /* local block merge */
5296 };
5297
5298 if (source < ARRAY_SIZE(dc_int_names))
5299 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5300 else
5301 snprintf(buf, bsize, "DCInt%u", source);
5302 return buf;
5303}
5304
5305static const char * const sdma_int_names[] = {
5306 "SDmaInt",
5307 "SdmaIdleInt",
5308 "SdmaProgressInt",
5309};
5310
5311/*
5312 * Return the SDMA engine interrupt name.
5313 */
5314static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5315{
5316 /* what interrupt */
5317 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5318 /* which engine */
5319 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5320
5321 if (likely(what < 3))
5322 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5323 else
5324 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5325 return buf;
5326}
5327
5328/*
5329 * Return the receive available interrupt name.
5330 */
5331static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5332{
5333 snprintf(buf, bsize, "RcvAvailInt%u", source);
5334 return buf;
5335}
5336
5337/*
5338 * Return the receive urgent interrupt name.
5339 */
5340static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5341{
5342 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5343 return buf;
5344}
5345
5346/*
5347 * Return the send credit interrupt name.
5348 */
5349static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5350{
5351 snprintf(buf, bsize, "SendCreditInt%u", source);
5352 return buf;
5353}
5354
5355/*
5356 * Return the reserved interrupt name.
5357 */
5358static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5359{
5360 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5361 return buf;
5362}
5363
5364static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5365{
5366 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005367 cce_err_status_flags,
5368 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005369}
5370
5371static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5372{
5373 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005374 rxe_err_status_flags,
5375 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005376}
5377
5378static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5379{
5380 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005381 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005382}
5383
5384static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5385{
5386 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005387 pio_err_status_flags,
5388 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005389}
5390
5391static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5392{
5393 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005394 sdma_err_status_flags,
5395 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005396}
5397
5398static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5399{
5400 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005401 egress_err_status_flags,
5402 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005403}
5404
5405static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5406{
5407 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005408 egress_err_info_flags,
5409 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005410}
5411
5412static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5413{
5414 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005415 send_err_status_flags,
5416 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005417}
5418
5419static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5420{
5421 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005422 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005423
5424 /*
5425 * For most these errors, there is nothing that can be done except
5426 * report or record it.
5427 */
5428 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005429 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005430
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005431 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5432 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005433 /* this error requires a manual drop into SPC freeze mode */
5434 /* then a fix up */
5435 start_freeze_handling(dd->pport, FREEZE_SELF);
5436 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005437
5438 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5439 if (reg & (1ull << i)) {
5440 incr_cntr64(&dd->cce_err_status_cnt[i]);
5441 /* maintain a counter over all cce_err_status errors */
5442 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5443 }
5444 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005445}
5446
5447/*
5448 * Check counters for receive errors that do not have an interrupt
5449 * associated with them.
5450 */
5451#define RCVERR_CHECK_TIME 10
5452static void update_rcverr_timer(unsigned long opaque)
5453{
5454 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5455 struct hfi1_pportdata *ppd = dd->pport;
5456 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5457
5458 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005459 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005460 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005461 set_link_down_reason(
5462 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5463 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005464 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5465 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005466 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005467
5468 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5469}
5470
5471static int init_rcverr(struct hfi1_devdata *dd)
5472{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305473 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005474 /* Assume the hardware counter has been reset */
5475 dd->rcv_ovfl_cnt = 0;
5476 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5477}
5478
5479static void free_rcverr(struct hfi1_devdata *dd)
5480{
5481 if (dd->rcverr_timer.data)
5482 del_timer_sync(&dd->rcverr_timer);
5483 dd->rcverr_timer.data = 0;
5484}
5485
5486static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5487{
5488 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005489 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005490
5491 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005492 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005493
5494 if (reg & ALL_RXE_FREEZE_ERR) {
5495 int flags = 0;
5496
5497 /*
5498 * Freeze mode recovery is disabled for the errors
5499 * in RXE_FREEZE_ABORT_MASK
5500 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005501 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005502 flags = FREEZE_ABORT;
5503
5504 start_freeze_handling(dd->pport, flags);
5505 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005506
5507 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5508 if (reg & (1ull << i))
5509 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5510 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005511}
5512
5513static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5514{
5515 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005516 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005517
5518 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005519 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005520 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5521 if (reg & (1ull << i))
5522 incr_cntr64(&dd->misc_err_status_cnt[i]);
5523 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005524}
5525
5526static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5527{
5528 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005529 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005530
5531 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005532 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005533
5534 if (reg & ALL_PIO_FREEZE_ERR)
5535 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005536
5537 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5538 if (reg & (1ull << i))
5539 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5540 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005541}
5542
5543static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5544{
5545 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005546 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005547
5548 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005549 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005550
5551 if (reg & ALL_SDMA_FREEZE_ERR)
5552 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005553
5554 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5555 if (reg & (1ull << i))
5556 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5557 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005558}
5559
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005560static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5561{
5562 incr_cntr64(&ppd->port_xmit_discards);
5563}
5564
Mike Marciniszyn77241052015-07-30 15:17:43 -04005565static void count_port_inactive(struct hfi1_devdata *dd)
5566{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005567 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005568}
5569
5570/*
5571 * We have had a "disallowed packet" error during egress. Determine the
5572 * integrity check which failed, and update relevant error counter, etc.
5573 *
5574 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5575 * bit of state per integrity check, and so we can miss the reason for an
5576 * egress error if more than one packet fails the same integrity check
5577 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5578 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005579static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5580 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005581{
5582 struct hfi1_pportdata *ppd = dd->pport;
5583 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5584 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5585 char buf[96];
5586
5587 /* clear down all observed info as quickly as possible after read */
5588 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5589
5590 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005591 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5592 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005593
5594 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005595 if (info & PORT_DISCARD_EGRESS_ERRS) {
5596 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005597
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005598 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005599 * Count all applicable bits as individual errors and
5600 * attribute them to the packet that triggered this handler.
5601 * This may not be completely accurate due to limitations
5602 * on the available hardware error information. There is
5603 * a single information register and any number of error
5604 * packets may have occurred and contributed to it before
5605 * this routine is called. This means that:
5606 * a) If multiple packets with the same error occur before
5607 * this routine is called, earlier packets are missed.
5608 * There is only a single bit for each error type.
5609 * b) Errors may not be attributed to the correct VL.
5610 * The driver is attributing all bits in the info register
5611 * to the packet that triggered this call, but bits
5612 * could be an accumulation of different packets with
5613 * different VLs.
5614 * c) A single error packet may have multiple counts attached
5615 * to it. There is no way for the driver to know if
5616 * multiple bits set in the info register are due to a
5617 * single packet or multiple packets. The driver assumes
5618 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005619 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005620 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005621 for (i = 0; i < weight; i++) {
5622 __count_port_discards(ppd);
5623 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5624 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5625 else if (vl == 15)
5626 incr_cntr64(&ppd->port_xmit_discards_vl
5627 [C_VL_15]);
5628 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005629 }
5630}
5631
5632/*
5633 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5634 * register. Does it represent a 'port inactive' error?
5635 */
5636static inline int port_inactive_err(u64 posn)
5637{
5638 return (posn >= SEES(TX_LINKDOWN) &&
5639 posn <= SEES(TX_INCORRECT_LINK_STATE));
5640}
5641
5642/*
5643 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5644 * register. Does it represent a 'disallowed packet' error?
5645 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005646static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005647{
5648 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5649 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5650}
5651
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005652/*
5653 * Input value is a bit position of one of the SDMA engine disallowed
5654 * packet errors. Return which engine. Use of this must be guarded by
5655 * disallowed_pkt_err().
5656 */
5657static inline int disallowed_pkt_engine(int posn)
5658{
5659 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5660}
5661
5662/*
5663 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5664 * be done.
5665 */
5666static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5667{
5668 struct sdma_vl_map *m;
5669 int vl;
5670
5671 /* range check */
5672 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5673 return -1;
5674
5675 rcu_read_lock();
5676 m = rcu_dereference(dd->sdma_map);
5677 vl = m->engine_to_vl[engine];
5678 rcu_read_unlock();
5679
5680 return vl;
5681}
5682
5683/*
5684 * Translate the send context (sofware index) into a VL. Return -1 if the
5685 * translation cannot be done.
5686 */
5687static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5688{
5689 struct send_context_info *sci;
5690 struct send_context *sc;
5691 int i;
5692
5693 sci = &dd->send_contexts[sw_index];
5694
5695 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005696 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005697 return -1;
5698
5699 sc = sci->sc;
5700 if (!sc)
5701 return -1;
5702 if (dd->vld[15].sc == sc)
5703 return 15;
5704 for (i = 0; i < num_vls; i++)
5705 if (dd->vld[i].sc == sc)
5706 return i;
5707
5708 return -1;
5709}
5710
Mike Marciniszyn77241052015-07-30 15:17:43 -04005711static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5712{
5713 u64 reg_copy = reg, handled = 0;
5714 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005715 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005716
5717 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5718 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005719 else if (is_ax(dd) &&
5720 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5721 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005722 start_freeze_handling(dd->pport, 0);
5723
5724 while (reg_copy) {
5725 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005726 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005727 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005728 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005729
5730 if (port_inactive_err(shift)) {
5731 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005732 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005733 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005734 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5735
5736 handle_send_egress_err_info(dd, vl);
5737 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005738 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005739 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005740 }
5741
5742 reg &= ~handled;
5743
5744 if (reg)
5745 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005746 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005747
5748 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5749 if (reg & (1ull << i))
5750 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5751 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005752}
5753
5754static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5755{
5756 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005757 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005758
5759 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005760 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005761
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005762 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5763 if (reg & (1ull << i))
5764 incr_cntr64(&dd->send_err_status_cnt[i]);
5765 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005766}
5767
5768/*
5769 * The maximum number of times the error clear down will loop before
5770 * blocking a repeating error. This value is arbitrary.
5771 */
5772#define MAX_CLEAR_COUNT 20
5773
5774/*
5775 * Clear and handle an error register. All error interrupts are funneled
5776 * through here to have a central location to correctly handle single-
5777 * or multi-shot errors.
5778 *
5779 * For non per-context registers, call this routine with a context value
5780 * of 0 so the per-context offset is zero.
5781 *
5782 * If the handler loops too many times, assume that something is wrong
5783 * and can't be fixed, so mask the error bits.
5784 */
5785static void interrupt_clear_down(struct hfi1_devdata *dd,
5786 u32 context,
5787 const struct err_reg_info *eri)
5788{
5789 u64 reg;
5790 u32 count;
5791
5792 /* read in a loop until no more errors are seen */
5793 count = 0;
5794 while (1) {
5795 reg = read_kctxt_csr(dd, context, eri->status);
5796 if (reg == 0)
5797 break;
5798 write_kctxt_csr(dd, context, eri->clear, reg);
5799 if (likely(eri->handler))
5800 eri->handler(dd, context, reg);
5801 count++;
5802 if (count > MAX_CLEAR_COUNT) {
5803 u64 mask;
5804
5805 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005806 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005807 /*
5808 * Read-modify-write so any other masked bits
5809 * remain masked.
5810 */
5811 mask = read_kctxt_csr(dd, context, eri->mask);
5812 mask &= ~reg;
5813 write_kctxt_csr(dd, context, eri->mask, mask);
5814 break;
5815 }
5816 }
5817}
5818
5819/*
5820 * CCE block "misc" interrupt. Source is < 16.
5821 */
5822static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5823{
5824 const struct err_reg_info *eri = &misc_errs[source];
5825
5826 if (eri->handler) {
5827 interrupt_clear_down(dd, 0, eri);
5828 } else {
5829 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005830 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005831 }
5832}
5833
5834static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5835{
5836 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005837 sc_err_status_flags,
5838 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005839}
5840
5841/*
5842 * Send context error interrupt. Source (hw_context) is < 160.
5843 *
5844 * All send context errors cause the send context to halt. The normal
5845 * clear-down mechanism cannot be used because we cannot clear the
5846 * error bits until several other long-running items are done first.
5847 * This is OK because with the context halted, nothing else is going
5848 * to happen on it anyway.
5849 */
5850static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5851 unsigned int hw_context)
5852{
5853 struct send_context_info *sci;
5854 struct send_context *sc;
5855 char flags[96];
5856 u64 status;
5857 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005858 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005859
5860 sw_index = dd->hw_to_sw[hw_context];
5861 if (sw_index >= dd->num_send_contexts) {
5862 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005863 "out of range sw index %u for send context %u\n",
5864 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005865 return;
5866 }
5867 sci = &dd->send_contexts[sw_index];
5868 sc = sci->sc;
5869 if (!sc) {
5870 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005871 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005872 return;
5873 }
5874
5875 /* tell the software that a halt has begun */
5876 sc_stop(sc, SCF_HALTED);
5877
5878 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5879
5880 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005881 send_context_err_status_string(flags, sizeof(flags),
5882 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005883
5884 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005885 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005886
5887 /*
5888 * Automatically restart halted kernel contexts out of interrupt
5889 * context. User contexts must ask the driver to restart the context.
5890 */
5891 if (sc->type != SC_USER)
5892 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005893
5894 /*
5895 * Update the counters for the corresponding status bits.
5896 * Note that these particular counters are aggregated over all
5897 * 160 contexts.
5898 */
5899 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5900 if (status & (1ull << i))
5901 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5902 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005903}
5904
5905static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5906 unsigned int source, u64 status)
5907{
5908 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005909 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005910
5911 sde = &dd->per_sdma[source];
5912#ifdef CONFIG_SDMA_VERBOSITY
5913 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5914 slashstrip(__FILE__), __LINE__, __func__);
5915 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5916 sde->this_idx, source, (unsigned long long)status);
5917#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005918 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005919 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005920
5921 /*
5922 * Update the counters for the corresponding status bits.
5923 * Note that these particular counters are aggregated over
5924 * all 16 DMA engines.
5925 */
5926 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5927 if (status & (1ull << i))
5928 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5929 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005930}
5931
5932/*
5933 * CCE block SDMA error interrupt. Source is < 16.
5934 */
5935static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5936{
5937#ifdef CONFIG_SDMA_VERBOSITY
5938 struct sdma_engine *sde = &dd->per_sdma[source];
5939
5940 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5941 slashstrip(__FILE__), __LINE__, __func__);
5942 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5943 source);
5944 sdma_dumpstate(sde);
5945#endif
5946 interrupt_clear_down(dd, source, &sdma_eng_err);
5947}
5948
5949/*
5950 * CCE block "various" interrupt. Source is < 8.
5951 */
5952static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5953{
5954 const struct err_reg_info *eri = &various_err[source];
5955
5956 /*
5957 * TCritInt cannot go through interrupt_clear_down()
5958 * because it is not a second tier interrupt. The handler
5959 * should be called directly.
5960 */
5961 if (source == TCRIT_INT_SOURCE)
5962 handle_temp_err(dd);
5963 else if (eri->handler)
5964 interrupt_clear_down(dd, 0, eri);
5965 else
5966 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005967 "%s: Unimplemented/reserved interrupt %d\n",
5968 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005969}
5970
5971static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5972{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005973 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005974 struct hfi1_pportdata *ppd = dd->pport;
5975 unsigned long flags;
5976 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5977
5978 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005979 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005980 dd_dev_info(dd, "%s: QSFP module removed\n",
5981 __func__);
5982
Mike Marciniszyn77241052015-07-30 15:17:43 -04005983 ppd->driver_link_ready = 0;
5984 /*
5985 * Cable removed, reset all our information about the
5986 * cache and cable capabilities
5987 */
5988
5989 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5990 /*
5991 * We don't set cache_refresh_required here as we expect
5992 * an interrupt when a cable is inserted
5993 */
5994 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005995 ppd->qsfp_info.reset_needed = 0;
5996 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005997 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08005998 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005999 /* Invert the ModPresent pin now to detect plug-in */
6000 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6001 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006002
6003 if ((ppd->offline_disabled_reason >
6004 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006005 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006006 (ppd->offline_disabled_reason ==
6007 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6008 ppd->offline_disabled_reason =
6009 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006010 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006011
Mike Marciniszyn77241052015-07-30 15:17:43 -04006012 if (ppd->host_link_state == HLS_DN_POLL) {
6013 /*
6014 * The link is still in POLL. This means
6015 * that the normal link down processing
6016 * will not happen. We have to do it here
6017 * before turning the DC off.
6018 */
6019 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6020 }
6021 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006022 dd_dev_info(dd, "%s: QSFP module inserted\n",
6023 __func__);
6024
Mike Marciniszyn77241052015-07-30 15:17:43 -04006025 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6026 ppd->qsfp_info.cache_valid = 0;
6027 ppd->qsfp_info.cache_refresh_required = 1;
6028 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006029 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006030
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006031 /*
6032 * Stop inversion of ModPresent pin to detect
6033 * removal of the cable
6034 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006035 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006036 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6037 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6038
6039 ppd->offline_disabled_reason =
6040 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006041 }
6042 }
6043
6044 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006045 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006046 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006047 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6048 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006049 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6050 }
6051
6052 /* Schedule the QSFP work only if there is a cable attached. */
6053 if (qsfp_mod_present(ppd))
6054 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6055}
6056
6057static int request_host_lcb_access(struct hfi1_devdata *dd)
6058{
6059 int ret;
6060
6061 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006062 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6063 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006064 if (ret != HCMD_SUCCESS) {
6065 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006066 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006067 }
6068 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6069}
6070
6071static int request_8051_lcb_access(struct hfi1_devdata *dd)
6072{
6073 int ret;
6074
6075 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006076 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6077 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006078 if (ret != HCMD_SUCCESS) {
6079 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006080 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006081 }
6082 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6083}
6084
6085/*
6086 * Set the LCB selector - allow host access. The DCC selector always
6087 * points to the host.
6088 */
6089static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6090{
6091 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006092 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6093 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006094}
6095
6096/*
6097 * Clear the LCB selector - allow 8051 access. The DCC selector always
6098 * points to the host.
6099 */
6100static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6101{
6102 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006103 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006104}
6105
6106/*
6107 * Acquire LCB access from the 8051. If the host already has access,
6108 * just increment a counter. Otherwise, inform the 8051 that the
6109 * host is taking access.
6110 *
6111 * Returns:
6112 * 0 on success
6113 * -EBUSY if the 8051 has control and cannot be disturbed
6114 * -errno if unable to acquire access from the 8051
6115 */
6116int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6117{
6118 struct hfi1_pportdata *ppd = dd->pport;
6119 int ret = 0;
6120
6121 /*
6122 * Use the host link state lock so the operation of this routine
6123 * { link state check, selector change, count increment } can occur
6124 * as a unit against a link state change. Otherwise there is a
6125 * race between the state change and the count increment.
6126 */
6127 if (sleep_ok) {
6128 mutex_lock(&ppd->hls_lock);
6129 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006130 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006131 udelay(1);
6132 }
6133
6134 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006135 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006136 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006137 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006138 ret = -EBUSY;
6139 goto done;
6140 }
6141
6142 if (dd->lcb_access_count == 0) {
6143 ret = request_host_lcb_access(dd);
6144 if (ret) {
6145 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006146 "%s: unable to acquire LCB access, err %d\n",
6147 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006148 goto done;
6149 }
6150 set_host_lcb_access(dd);
6151 }
6152 dd->lcb_access_count++;
6153done:
6154 mutex_unlock(&ppd->hls_lock);
6155 return ret;
6156}
6157
6158/*
6159 * Release LCB access by decrementing the use count. If the count is moving
6160 * from 1 to 0, inform 8051 that it has control back.
6161 *
6162 * Returns:
6163 * 0 on success
6164 * -errno if unable to release access to the 8051
6165 */
6166int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6167{
6168 int ret = 0;
6169
6170 /*
6171 * Use the host link state lock because the acquire needed it.
6172 * Here, we only need to keep { selector change, count decrement }
6173 * as a unit.
6174 */
6175 if (sleep_ok) {
6176 mutex_lock(&dd->pport->hls_lock);
6177 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006178 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006179 udelay(1);
6180 }
6181
6182 if (dd->lcb_access_count == 0) {
6183 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006184 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006185 goto done;
6186 }
6187
6188 if (dd->lcb_access_count == 1) {
6189 set_8051_lcb_access(dd);
6190 ret = request_8051_lcb_access(dd);
6191 if (ret) {
6192 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006193 "%s: unable to release LCB access, err %d\n",
6194 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006195 /* restore host access if the grant didn't work */
6196 set_host_lcb_access(dd);
6197 goto done;
6198 }
6199 }
6200 dd->lcb_access_count--;
6201done:
6202 mutex_unlock(&dd->pport->hls_lock);
6203 return ret;
6204}
6205
6206/*
6207 * Initialize LCB access variables and state. Called during driver load,
6208 * after most of the initialization is finished.
6209 *
6210 * The DC default is LCB access on for the host. The driver defaults to
6211 * leaving access to the 8051. Assign access now - this constrains the call
6212 * to this routine to be after all LCB set-up is done. In particular, after
6213 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6214 */
6215static void init_lcb_access(struct hfi1_devdata *dd)
6216{
6217 dd->lcb_access_count = 0;
6218}
6219
6220/*
6221 * Write a response back to a 8051 request.
6222 */
6223static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6224{
6225 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006226 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6227 (u64)return_code <<
6228 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6229 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006230}
6231
6232/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006233 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006234 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006235static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006236{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006237 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006238 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006239 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006240 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006241
6242 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6243 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6244 return; /* no request */
6245
6246 /* zero out COMPLETED so the response is seen */
6247 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6248
6249 /* extract request details */
6250 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6251 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6252 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6253 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6254
6255 switch (type) {
6256 case HREQ_LOAD_CONFIG:
6257 case HREQ_SAVE_CONFIG:
6258 case HREQ_READ_CONFIG:
6259 case HREQ_SET_TX_EQ_ABS:
6260 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006261 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006262 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006263 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006264 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6265 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006266 case HREQ_CONFIG_DONE:
6267 hreq_response(dd, HREQ_SUCCESS, 0);
6268 break;
6269
6270 case HREQ_INTERFACE_TEST:
6271 hreq_response(dd, HREQ_SUCCESS, data);
6272 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006273 default:
6274 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6275 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6276 break;
6277 }
6278}
6279
6280static void write_global_credit(struct hfi1_devdata *dd,
6281 u8 vau, u16 total, u16 shared)
6282{
6283 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006284 ((u64)total <<
6285 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6286 ((u64)shared <<
6287 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6288 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006289}
6290
6291/*
6292 * Set up initial VL15 credits of the remote. Assumes the rest of
6293 * the CM credit registers are zero from a previous global or credit reset .
6294 */
6295void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6296{
6297 /* leave shared count at zero for both global and VL15 */
6298 write_global_credit(dd, vau, vl15buf, 0);
6299
6300 /* We may need some credits for another VL when sending packets
6301 * with the snoop interface. Dividing it down the middle for VL15
6302 * and VL0 should suffice.
6303 */
6304 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6305 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6306 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6307 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6308 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6309 } else {
6310 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6311 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6312 }
6313}
6314
6315/*
6316 * Zero all credit details from the previous connection and
6317 * reset the CM manager's internal counters.
6318 */
6319void reset_link_credits(struct hfi1_devdata *dd)
6320{
6321 int i;
6322
6323 /* remove all previous VL credit limits */
6324 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006325 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006326 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6327 write_global_credit(dd, 0, 0, 0);
6328 /* reset the CM block */
6329 pio_send_control(dd, PSC_CM_RESET);
6330}
6331
6332/* convert a vCU to a CU */
6333static u32 vcu_to_cu(u8 vcu)
6334{
6335 return 1 << vcu;
6336}
6337
6338/* convert a CU to a vCU */
6339static u8 cu_to_vcu(u32 cu)
6340{
6341 return ilog2(cu);
6342}
6343
6344/* convert a vAU to an AU */
6345static u32 vau_to_au(u8 vau)
6346{
6347 return 8 * (1 << vau);
6348}
6349
6350static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6351{
6352 ppd->sm_trap_qp = 0x0;
6353 ppd->sa_qp = 0x1;
6354}
6355
6356/*
6357 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6358 */
6359static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6360{
6361 u64 reg;
6362
6363 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6364 write_csr(dd, DC_LCB_CFG_RUN, 0);
6365 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6366 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006367 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006368 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6369 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6370 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006371 write_csr(dd, DCC_CFG_RESET, reg |
6372 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6373 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006374 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006375 if (!abort) {
6376 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6377 write_csr(dd, DCC_CFG_RESET, reg);
6378 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6379 }
6380}
6381
6382/*
6383 * This routine should be called after the link has been transitioned to
6384 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6385 * reset).
6386 *
6387 * The expectation is that the caller of this routine would have taken
6388 * care of properly transitioning the link into the correct state.
6389 */
6390static void dc_shutdown(struct hfi1_devdata *dd)
6391{
6392 unsigned long flags;
6393
6394 spin_lock_irqsave(&dd->dc8051_lock, flags);
6395 if (dd->dc_shutdown) {
6396 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6397 return;
6398 }
6399 dd->dc_shutdown = 1;
6400 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6401 /* Shutdown the LCB */
6402 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006403 /*
6404 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006405 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006406 * itself.
6407 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006408 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6409}
6410
Jubin John4d114fd2016-02-14 20:21:43 -08006411/*
6412 * Calling this after the DC has been brought out of reset should not
6413 * do any damage.
6414 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006415static void dc_start(struct hfi1_devdata *dd)
6416{
6417 unsigned long flags;
6418 int ret;
6419
6420 spin_lock_irqsave(&dd->dc8051_lock, flags);
6421 if (!dd->dc_shutdown)
6422 goto done;
6423 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6424 /* Take the 8051 out of reset */
6425 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6426 /* Wait until 8051 is ready */
6427 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6428 if (ret) {
6429 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006430 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006431 }
6432 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6433 write_csr(dd, DCC_CFG_RESET, 0x10);
6434 /* lcb_shutdown() with abort=1 does not restore these */
6435 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6436 spin_lock_irqsave(&dd->dc8051_lock, flags);
6437 dd->dc_shutdown = 0;
6438done:
6439 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6440}
6441
6442/*
6443 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6444 */
6445static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6446{
6447 u64 rx_radr, tx_radr;
6448 u32 version;
6449
6450 if (dd->icode != ICODE_FPGA_EMULATION)
6451 return;
6452
6453 /*
6454 * These LCB defaults on emulator _s are good, nothing to do here:
6455 * LCB_CFG_TX_FIFOS_RADR
6456 * LCB_CFG_RX_FIFOS_RADR
6457 * LCB_CFG_LN_DCLK
6458 * LCB_CFG_IGNORE_LOST_RCLK
6459 */
6460 if (is_emulator_s(dd))
6461 return;
6462 /* else this is _p */
6463
6464 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006465 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006466 version = 0x2d; /* all B0 use 0x2d or higher settings */
6467
6468 if (version <= 0x12) {
6469 /* release 0x12 and below */
6470
6471 /*
6472 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6473 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6474 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6475 */
6476 rx_radr =
6477 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6478 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6479 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6480 /*
6481 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6482 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6483 */
6484 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6485 } else if (version <= 0x18) {
6486 /* release 0x13 up to 0x18 */
6487 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6488 rx_radr =
6489 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6490 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6491 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6492 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6493 } else if (version == 0x19) {
6494 /* release 0x19 */
6495 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6496 rx_radr =
6497 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6498 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6499 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6500 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6501 } else if (version == 0x1a) {
6502 /* release 0x1a */
6503 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6504 rx_radr =
6505 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6506 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6507 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6508 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6509 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6510 } else {
6511 /* release 0x1b and higher */
6512 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6513 rx_radr =
6514 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6515 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6516 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6517 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6518 }
6519
6520 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6521 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6522 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006523 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006524 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6525}
6526
6527/*
6528 * Handle a SMA idle message
6529 *
6530 * This is a work-queue function outside of the interrupt.
6531 */
6532void handle_sma_message(struct work_struct *work)
6533{
6534 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6535 sma_message_work);
6536 struct hfi1_devdata *dd = ppd->dd;
6537 u64 msg;
6538 int ret;
6539
Jubin John4d114fd2016-02-14 20:21:43 -08006540 /*
6541 * msg is bytes 1-4 of the 40-bit idle message - the command code
6542 * is stripped off
6543 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006544 ret = read_idle_sma(dd, &msg);
6545 if (ret)
6546 return;
6547 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6548 /*
6549 * React to the SMA message. Byte[1] (0 for us) is the command.
6550 */
6551 switch (msg & 0xff) {
6552 case SMA_IDLE_ARM:
6553 /*
6554 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6555 * State Transitions
6556 *
6557 * Only expected in INIT or ARMED, discard otherwise.
6558 */
6559 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6560 ppd->neighbor_normal = 1;
6561 break;
6562 case SMA_IDLE_ACTIVE:
6563 /*
6564 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6565 * State Transitions
6566 *
6567 * Can activate the node. Discard otherwise.
6568 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006569 if (ppd->host_link_state == HLS_UP_ARMED &&
6570 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006571 ppd->neighbor_normal = 1;
6572 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6573 if (ret)
6574 dd_dev_err(
6575 dd,
6576 "%s: received Active SMA idle message, couldn't set link to Active\n",
6577 __func__);
6578 }
6579 break;
6580 default:
6581 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006582 "%s: received unexpected SMA idle message 0x%llx\n",
6583 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006584 break;
6585 }
6586}
6587
6588static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6589{
6590 u64 rcvctrl;
6591 unsigned long flags;
6592
6593 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6594 rcvctrl = read_csr(dd, RCV_CTRL);
6595 rcvctrl |= add;
6596 rcvctrl &= ~clear;
6597 write_csr(dd, RCV_CTRL, rcvctrl);
6598 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6599}
6600
6601static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6602{
6603 adjust_rcvctrl(dd, add, 0);
6604}
6605
6606static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6607{
6608 adjust_rcvctrl(dd, 0, clear);
6609}
6610
6611/*
6612 * Called from all interrupt handlers to start handling an SPC freeze.
6613 */
6614void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6615{
6616 struct hfi1_devdata *dd = ppd->dd;
6617 struct send_context *sc;
6618 int i;
6619
6620 if (flags & FREEZE_SELF)
6621 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6622
6623 /* enter frozen mode */
6624 dd->flags |= HFI1_FROZEN;
6625
6626 /* notify all SDMA engines that they are going into a freeze */
6627 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6628
6629 /* do halt pre-handling on all enabled send contexts */
6630 for (i = 0; i < dd->num_send_contexts; i++) {
6631 sc = dd->send_contexts[i].sc;
6632 if (sc && (sc->flags & SCF_ENABLED))
6633 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6634 }
6635
6636 /* Send context are frozen. Notify user space */
6637 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6638
6639 if (flags & FREEZE_ABORT) {
6640 dd_dev_err(dd,
6641 "Aborted freeze recovery. Please REBOOT system\n");
6642 return;
6643 }
6644 /* queue non-interrupt handler */
6645 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6646}
6647
6648/*
6649 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6650 * depending on the "freeze" parameter.
6651 *
6652 * No need to return an error if it times out, our only option
6653 * is to proceed anyway.
6654 */
6655static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6656{
6657 unsigned long timeout;
6658 u64 reg;
6659
6660 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6661 while (1) {
6662 reg = read_csr(dd, CCE_STATUS);
6663 if (freeze) {
6664 /* waiting until all indicators are set */
6665 if ((reg & ALL_FROZE) == ALL_FROZE)
6666 return; /* all done */
6667 } else {
6668 /* waiting until all indicators are clear */
6669 if ((reg & ALL_FROZE) == 0)
6670 return; /* all done */
6671 }
6672
6673 if (time_after(jiffies, timeout)) {
6674 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006675 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6676 freeze ? "" : "un", reg & ALL_FROZE,
6677 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006678 return;
6679 }
6680 usleep_range(80, 120);
6681 }
6682}
6683
6684/*
6685 * Do all freeze handling for the RXE block.
6686 */
6687static void rxe_freeze(struct hfi1_devdata *dd)
6688{
6689 int i;
6690
6691 /* disable port */
6692 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6693
6694 /* disable all receive contexts */
6695 for (i = 0; i < dd->num_rcv_contexts; i++)
6696 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6697}
6698
6699/*
6700 * Unfreeze handling for the RXE block - kernel contexts only.
6701 * This will also enable the port. User contexts will do unfreeze
6702 * handling on a per-context basis as they call into the driver.
6703 *
6704 */
6705static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6706{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006707 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006708 int i;
6709
6710 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006711 for (i = 0; i < dd->n_krcv_queues; i++) {
6712 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6713 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6714 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6715 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6716 hfi1_rcvctrl(dd, rcvmask, i);
6717 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006718
6719 /* enable port */
6720 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6721}
6722
6723/*
6724 * Non-interrupt SPC freeze handling.
6725 *
6726 * This is a work-queue function outside of the triggering interrupt.
6727 */
6728void handle_freeze(struct work_struct *work)
6729{
6730 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6731 freeze_work);
6732 struct hfi1_devdata *dd = ppd->dd;
6733
6734 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006735 wait_for_freeze_status(dd, 1);
6736
6737 /* SPC is now frozen */
6738
6739 /* do send PIO freeze steps */
6740 pio_freeze(dd);
6741
6742 /* do send DMA freeze steps */
6743 sdma_freeze(dd);
6744
6745 /* do send egress freeze steps - nothing to do */
6746
6747 /* do receive freeze steps */
6748 rxe_freeze(dd);
6749
6750 /*
6751 * Unfreeze the hardware - clear the freeze, wait for each
6752 * block's frozen bit to clear, then clear the frozen flag.
6753 */
6754 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6755 wait_for_freeze_status(dd, 0);
6756
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006757 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006758 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6759 wait_for_freeze_status(dd, 1);
6760 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6761 wait_for_freeze_status(dd, 0);
6762 }
6763
6764 /* do send PIO unfreeze steps for kernel contexts */
6765 pio_kernel_unfreeze(dd);
6766
6767 /* do send DMA unfreeze steps */
6768 sdma_unfreeze(dd);
6769
6770 /* do send egress unfreeze steps - nothing to do */
6771
6772 /* do receive unfreeze steps for kernel contexts */
6773 rxe_kernel_unfreeze(dd);
6774
6775 /*
6776 * The unfreeze procedure touches global device registers when
6777 * it disables and re-enables RXE. Mark the device unfrozen
6778 * after all that is done so other parts of the driver waiting
6779 * for the device to unfreeze don't do things out of order.
6780 *
6781 * The above implies that the meaning of HFI1_FROZEN flag is
6782 * "Device has gone into freeze mode and freeze mode handling
6783 * is still in progress."
6784 *
6785 * The flag will be removed when freeze mode processing has
6786 * completed.
6787 */
6788 dd->flags &= ~HFI1_FROZEN;
6789 wake_up(&dd->event_queue);
6790
6791 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006792}
6793
6794/*
6795 * Handle a link up interrupt from the 8051.
6796 *
6797 * This is a work-queue function outside of the interrupt.
6798 */
6799void handle_link_up(struct work_struct *work)
6800{
6801 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006802 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006803 set_link_state(ppd, HLS_UP_INIT);
6804
6805 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6806 read_ltp_rtt(ppd->dd);
6807 /*
6808 * OPA specifies that certain counters are cleared on a transition
6809 * to link up, so do that.
6810 */
6811 clear_linkup_counters(ppd->dd);
6812 /*
6813 * And (re)set link up default values.
6814 */
6815 set_linkup_defaults(ppd);
6816
6817 /* enforce link speed enabled */
6818 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6819 /* oops - current speed is not enabled, bounce */
6820 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006821 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6822 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006823 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006824 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006825 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006826 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006827 start_link(ppd);
6828 }
6829}
6830
Jubin John4d114fd2016-02-14 20:21:43 -08006831/*
6832 * Several pieces of LNI information were cached for SMA in ppd.
6833 * Reset these on link down
6834 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006835static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6836{
6837 ppd->neighbor_guid = 0;
6838 ppd->neighbor_port_number = 0;
6839 ppd->neighbor_type = 0;
6840 ppd->neighbor_fm_security = 0;
6841}
6842
Dean Luickfeb831d2016-04-14 08:31:36 -07006843static const char * const link_down_reason_strs[] = {
6844 [OPA_LINKDOWN_REASON_NONE] = "None",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6846 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6847 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6848 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6849 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6850 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6851 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6852 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6853 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6854 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6855 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6856 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6857 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6858 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6859 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6860 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6861 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6862 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6863 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6864 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6865 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6866 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6867 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6868 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6869 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6870 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6871 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6872 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6873 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6874 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6875 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6876 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6877 "Excessive buffer overrun",
6878 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6879 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6880 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6881 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6882 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6883 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6884 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6885 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6886 "Local media not installed",
6887 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6888 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6889 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6890 "End to end not installed",
6891 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6892 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6893 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6894 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6895 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6896 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6897};
6898
6899/* return the neighbor link down reason string */
6900static const char *link_down_reason_str(u8 reason)
6901{
6902 const char *str = NULL;
6903
6904 if (reason < ARRAY_SIZE(link_down_reason_strs))
6905 str = link_down_reason_strs[reason];
6906 if (!str)
6907 str = "(invalid)";
6908
6909 return str;
6910}
6911
Mike Marciniszyn77241052015-07-30 15:17:43 -04006912/*
6913 * Handle a link down interrupt from the 8051.
6914 *
6915 * This is a work-queue function outside of the interrupt.
6916 */
6917void handle_link_down(struct work_struct *work)
6918{
6919 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006920 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006921 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006922 link_down_work);
6923 int was_up;
6924 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006925
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006926 if ((ppd->host_link_state &
6927 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6928 ppd->port_type == PORT_TYPE_FIXED)
6929 ppd->offline_disabled_reason =
6930 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6931
6932 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006933 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006934 set_link_state(ppd, HLS_DN_OFFLINE);
6935
Dean Luickfeb831d2016-04-14 08:31:36 -07006936 if (was_up) {
6937 lcl_reason = 0;
6938 /* link down reason is only valid if the link was up */
6939 read_link_down_reason(ppd->dd, &link_down_reason);
6940 switch (link_down_reason) {
6941 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6942 /* the link went down, no idle message reason */
6943 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6944 ldr_str);
6945 break;
6946 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6947 /*
6948 * The neighbor reason is only valid if an idle message
6949 * was received for it.
6950 */
6951 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6952 dd_dev_info(ppd->dd,
6953 "%sNeighbor link down message %d, %s\n",
6954 ldr_str, neigh_reason,
6955 link_down_reason_str(neigh_reason));
6956 break;
6957 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6958 dd_dev_info(ppd->dd,
6959 "%sHost requested link to go offline\n",
6960 ldr_str);
6961 break;
6962 default:
6963 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6964 ldr_str, link_down_reason);
6965 break;
6966 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006967
Dean Luickfeb831d2016-04-14 08:31:36 -07006968 /*
6969 * If no reason, assume peer-initiated but missed
6970 * LinkGoingDown idle flits.
6971 */
6972 if (neigh_reason == 0)
6973 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6974 } else {
6975 /* went down while polling or going up */
6976 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6977 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006978
6979 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6980
Dean Luick015e91f2016-04-14 08:31:42 -07006981 /* inform the SMA when the link transitions from up to down */
6982 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6983 ppd->neigh_link_down_reason.sma == 0) {
6984 ppd->local_link_down_reason.sma =
6985 ppd->local_link_down_reason.latest;
6986 ppd->neigh_link_down_reason.sma =
6987 ppd->neigh_link_down_reason.latest;
6988 }
6989
Mike Marciniszyn77241052015-07-30 15:17:43 -04006990 reset_neighbor_info(ppd);
6991
6992 /* disable the port */
6993 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6994
Jubin John4d114fd2016-02-14 20:21:43 -08006995 /*
6996 * If there is no cable attached, turn the DC off. Otherwise,
6997 * start the link bring up.
6998 */
Easwar Hariharan623bba22016-04-12 11:25:57 -07006999 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007000 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007001 } else {
7002 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007003 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007004 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007005}
7006
7007void handle_link_bounce(struct work_struct *work)
7008{
7009 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7010 link_bounce_work);
7011
7012 /*
7013 * Only do something if the link is currently up.
7014 */
7015 if (ppd->host_link_state & HLS_UP) {
7016 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007017 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007018 start_link(ppd);
7019 } else {
7020 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007021 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007022 }
7023}
7024
7025/*
7026 * Mask conversion: Capability exchange to Port LTP. The capability
7027 * exchange has an implicit 16b CRC that is mandatory.
7028 */
7029static int cap_to_port_ltp(int cap)
7030{
7031 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7032
7033 if (cap & CAP_CRC_14B)
7034 port_ltp |= PORT_LTP_CRC_MODE_14;
7035 if (cap & CAP_CRC_48B)
7036 port_ltp |= PORT_LTP_CRC_MODE_48;
7037 if (cap & CAP_CRC_12B_16B_PER_LANE)
7038 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7039
7040 return port_ltp;
7041}
7042
7043/*
7044 * Convert an OPA Port LTP mask to capability mask
7045 */
7046int port_ltp_to_cap(int port_ltp)
7047{
7048 int cap_mask = 0;
7049
7050 if (port_ltp & PORT_LTP_CRC_MODE_14)
7051 cap_mask |= CAP_CRC_14B;
7052 if (port_ltp & PORT_LTP_CRC_MODE_48)
7053 cap_mask |= CAP_CRC_48B;
7054 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7055 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7056
7057 return cap_mask;
7058}
7059
7060/*
7061 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7062 */
7063static int lcb_to_port_ltp(int lcb_crc)
7064{
7065 int port_ltp = 0;
7066
7067 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7068 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7069 else if (lcb_crc == LCB_CRC_48B)
7070 port_ltp = PORT_LTP_CRC_MODE_48;
7071 else if (lcb_crc == LCB_CRC_14B)
7072 port_ltp = PORT_LTP_CRC_MODE_14;
7073 else
7074 port_ltp = PORT_LTP_CRC_MODE_16;
7075
7076 return port_ltp;
7077}
7078
7079/*
7080 * Our neighbor has indicated that we are allowed to act as a fabric
7081 * manager, so place the full management partition key in the second
7082 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7083 * that we should already have the limited management partition key in
7084 * array element 1, and also that the port is not yet up when
7085 * add_full_mgmt_pkey() is invoked.
7086 */
7087static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7088{
7089 struct hfi1_devdata *dd = ppd->dd;
7090
Dean Luick87645222015-12-01 15:38:21 -05007091 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7092 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7093 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7094 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007095 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7096 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007097 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007098}
7099
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007100static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007101{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007102 if (ppd->pkeys[2] != 0) {
7103 ppd->pkeys[2] = 0;
7104 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007105 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007106 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007107}
7108
Mike Marciniszyn77241052015-07-30 15:17:43 -04007109/*
7110 * Convert the given link width to the OPA link width bitmask.
7111 */
7112static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7113{
7114 switch (width) {
7115 case 0:
7116 /*
7117 * Simulator and quick linkup do not set the width.
7118 * Just set it to 4x without complaint.
7119 */
7120 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7121 return OPA_LINK_WIDTH_4X;
7122 return 0; /* no lanes up */
7123 case 1: return OPA_LINK_WIDTH_1X;
7124 case 2: return OPA_LINK_WIDTH_2X;
7125 case 3: return OPA_LINK_WIDTH_3X;
7126 default:
7127 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007128 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007129 /* fall through */
7130 case 4: return OPA_LINK_WIDTH_4X;
7131 }
7132}
7133
7134/*
7135 * Do a population count on the bottom nibble.
7136 */
7137static const u8 bit_counts[16] = {
7138 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7139};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007140
Mike Marciniszyn77241052015-07-30 15:17:43 -04007141static inline u8 nibble_to_count(u8 nibble)
7142{
7143 return bit_counts[nibble & 0xf];
7144}
7145
7146/*
7147 * Read the active lane information from the 8051 registers and return
7148 * their widths.
7149 *
7150 * Active lane information is found in these 8051 registers:
7151 * enable_lane_tx
7152 * enable_lane_rx
7153 */
7154static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7155 u16 *rx_width)
7156{
7157 u16 tx, rx;
7158 u8 enable_lane_rx;
7159 u8 enable_lane_tx;
7160 u8 tx_polarity_inversion;
7161 u8 rx_polarity_inversion;
7162 u8 max_rate;
7163
7164 /* read the active lanes */
7165 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007166 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007167 read_local_lni(dd, &enable_lane_rx);
7168
7169 /* convert to counts */
7170 tx = nibble_to_count(enable_lane_tx);
7171 rx = nibble_to_count(enable_lane_rx);
7172
7173 /*
7174 * Set link_speed_active here, overriding what was set in
7175 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7176 * set the max_rate field in handle_verify_cap until v0.19.
7177 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007178 if ((dd->icode == ICODE_RTL_SILICON) &&
7179 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007180 /* max_rate: 0 = 12.5G, 1 = 25G */
7181 switch (max_rate) {
7182 case 0:
7183 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7184 break;
7185 default:
7186 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007187 "%s: unexpected max rate %d, using 25Gb\n",
7188 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007189 /* fall through */
7190 case 1:
7191 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7192 break;
7193 }
7194 }
7195
7196 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007197 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7198 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007199 *tx_width = link_width_to_bits(dd, tx);
7200 *rx_width = link_width_to_bits(dd, rx);
7201}
7202
7203/*
7204 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7205 * Valid after the end of VerifyCap and during LinkUp. Does not change
7206 * after link up. I.e. look elsewhere for downgrade information.
7207 *
7208 * Bits are:
7209 * + bits [7:4] contain the number of active transmitters
7210 * + bits [3:0] contain the number of active receivers
7211 * These are numbers 1 through 4 and can be different values if the
7212 * link is asymmetric.
7213 *
7214 * verify_cap_local_fm_link_width[0] retains its original value.
7215 */
7216static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7217 u16 *rx_width)
7218{
7219 u16 widths, tx, rx;
7220 u8 misc_bits, local_flags;
7221 u16 active_tx, active_rx;
7222
7223 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7224 tx = widths >> 12;
7225 rx = (widths >> 8) & 0xf;
7226
7227 *tx_width = link_width_to_bits(dd, tx);
7228 *rx_width = link_width_to_bits(dd, rx);
7229
7230 /* print the active widths */
7231 get_link_widths(dd, &active_tx, &active_rx);
7232}
7233
7234/*
7235 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7236 * hardware information when the link first comes up.
7237 *
7238 * The link width is not available until after VerifyCap.AllFramesReceived
7239 * (the trigger for handle_verify_cap), so this is outside that routine
7240 * and should be called when the 8051 signals linkup.
7241 */
7242void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7243{
7244 u16 tx_width, rx_width;
7245
7246 /* get end-of-LNI link widths */
7247 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7248
7249 /* use tx_width as the link is supposed to be symmetric on link up */
7250 ppd->link_width_active = tx_width;
7251 /* link width downgrade active (LWD.A) starts out matching LW.A */
7252 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7253 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7254 /* per OPA spec, on link up LWD.E resets to LWD.S */
7255 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7256 /* cache the active egress rate (units {10^6 bits/sec]) */
7257 ppd->current_egress_rate = active_egress_rate(ppd);
7258}
7259
7260/*
7261 * Handle a verify capabilities interrupt from the 8051.
7262 *
7263 * This is a work-queue function outside of the interrupt.
7264 */
7265void handle_verify_cap(struct work_struct *work)
7266{
7267 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7268 link_vc_work);
7269 struct hfi1_devdata *dd = ppd->dd;
7270 u64 reg;
7271 u8 power_management;
7272 u8 continious;
7273 u8 vcu;
7274 u8 vau;
7275 u8 z;
7276 u16 vl15buf;
7277 u16 link_widths;
7278 u16 crc_mask;
7279 u16 crc_val;
7280 u16 device_id;
7281 u16 active_tx, active_rx;
7282 u8 partner_supported_crc;
7283 u8 remote_tx_rate;
7284 u8 device_rev;
7285
7286 set_link_state(ppd, HLS_VERIFY_CAP);
7287
7288 lcb_shutdown(dd, 0);
7289 adjust_lcb_for_fpga_serdes(dd);
7290
7291 /*
7292 * These are now valid:
7293 * remote VerifyCap fields in the general LNI config
7294 * CSR DC8051_STS_REMOTE_GUID
7295 * CSR DC8051_STS_REMOTE_NODE_TYPE
7296 * CSR DC8051_STS_REMOTE_FM_SECURITY
7297 * CSR DC8051_STS_REMOTE_PORT_NO
7298 */
7299
7300 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007301 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7302 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007303 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7304 read_remote_device_id(dd, &device_id, &device_rev);
7305 /*
7306 * And the 'MgmtAllowed' information, which is exchanged during
7307 * LNI, is also be available at this point.
7308 */
7309 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7310 /* print the active widths */
7311 get_link_widths(dd, &active_tx, &active_rx);
7312 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007313 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7314 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007315 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007316 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7317 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7318 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007319 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007320 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007321 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007322 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007323 /*
7324 * The peer vAU value just read is the peer receiver value. HFI does
7325 * not support a transmit vAU of 0 (AU == 8). We advertised that
7326 * with Z=1 in the fabric capabilities sent to the peer. The peer
7327 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7328 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7329 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7330 * subject to the Z value exception.
7331 */
7332 if (vau == 0)
7333 vau = 1;
7334 set_up_vl15(dd, vau, vl15buf);
7335
7336 /* set up the LCB CRC mode */
7337 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7338
7339 /* order is important: use the lowest bit in common */
7340 if (crc_mask & CAP_CRC_14B)
7341 crc_val = LCB_CRC_14B;
7342 else if (crc_mask & CAP_CRC_48B)
7343 crc_val = LCB_CRC_48B;
7344 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7345 crc_val = LCB_CRC_12B_16B_PER_LANE;
7346 else
7347 crc_val = LCB_CRC_16B;
7348
7349 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7350 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7351 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7352
7353 /* set (14b only) or clear sideband credit */
7354 reg = read_csr(dd, SEND_CM_CTRL);
7355 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7356 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007357 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007358 } else {
7359 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007360 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007361 }
7362
7363 ppd->link_speed_active = 0; /* invalid value */
7364 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7365 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7366 switch (remote_tx_rate) {
7367 case 0:
7368 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7369 break;
7370 case 1:
7371 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7372 break;
7373 }
7374 } else {
7375 /* actual rate is highest bit of the ANDed rates */
7376 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7377
7378 if (rate & 2)
7379 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7380 else if (rate & 1)
7381 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7382 }
7383 if (ppd->link_speed_active == 0) {
7384 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007385 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007386 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7387 }
7388
7389 /*
7390 * Cache the values of the supported, enabled, and active
7391 * LTP CRC modes to return in 'portinfo' queries. But the bit
7392 * flags that are returned in the portinfo query differ from
7393 * what's in the link_crc_mask, crc_sizes, and crc_val
7394 * variables. Convert these here.
7395 */
7396 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7397 /* supported crc modes */
7398 ppd->port_ltp_crc_mode |=
7399 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7400 /* enabled crc modes */
7401 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7402 /* active crc mode */
7403
7404 /* set up the remote credit return table */
7405 assign_remote_cm_au_table(dd, vcu);
7406
7407 /*
7408 * The LCB is reset on entry to handle_verify_cap(), so this must
7409 * be applied on every link up.
7410 *
7411 * Adjust LCB error kill enable to kill the link if
7412 * these RBUF errors are seen:
7413 * REPLAY_BUF_MBE_SMASK
7414 * FLIT_INPUT_BUF_MBE_SMASK
7415 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007416 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007417 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7418 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7419 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7420 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7421 }
7422
7423 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7424 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7425
7426 /* give 8051 access to the LCB CSRs */
7427 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7428 set_8051_lcb_access(dd);
7429
7430 ppd->neighbor_guid =
7431 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7432 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7433 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7434 ppd->neighbor_type =
7435 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7436 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7437 ppd->neighbor_fm_security =
7438 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7439 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7440 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007441 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7442 ppd->neighbor_guid, ppd->neighbor_type,
7443 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007444 if (ppd->mgmt_allowed)
7445 add_full_mgmt_pkey(ppd);
7446
7447 /* tell the 8051 to go to LinkUp */
7448 set_link_state(ppd, HLS_GOING_UP);
7449}
7450
7451/*
7452 * Apply the link width downgrade enabled policy against the current active
7453 * link widths.
7454 *
7455 * Called when the enabled policy changes or the active link widths change.
7456 */
7457void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7458{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007459 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007460 int tries;
7461 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007462 u16 tx, rx;
7463
Dean Luick323fd782015-11-16 21:59:24 -05007464 /* use the hls lock to avoid a race with actual link up */
7465 tries = 0;
7466retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007467 mutex_lock(&ppd->hls_lock);
7468 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007469 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007470 /* still going up..wait and retry */
7471 if (ppd->host_link_state & HLS_GOING_UP) {
7472 if (++tries < 1000) {
7473 mutex_unlock(&ppd->hls_lock);
7474 usleep_range(100, 120); /* arbitrary */
7475 goto retry;
7476 }
7477 dd_dev_err(ppd->dd,
7478 "%s: giving up waiting for link state change\n",
7479 __func__);
7480 }
7481 goto done;
7482 }
7483
7484 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007485
7486 if (refresh_widths) {
7487 get_link_widths(ppd->dd, &tx, &rx);
7488 ppd->link_width_downgrade_tx_active = tx;
7489 ppd->link_width_downgrade_rx_active = rx;
7490 }
7491
Dean Luickf9b56352016-04-14 08:31:30 -07007492 if (ppd->link_width_downgrade_tx_active == 0 ||
7493 ppd->link_width_downgrade_rx_active == 0) {
7494 /* the 8051 reported a dead link as a downgrade */
7495 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7496 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007497 /* downgrade is disabled */
7498
7499 /* bounce if not at starting active width */
7500 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007501 ppd->link_width_downgrade_tx_active) ||
7502 (ppd->link_width_active !=
7503 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007504 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007505 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007506 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007507 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7508 ppd->link_width_active,
7509 ppd->link_width_downgrade_tx_active,
7510 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007511 do_bounce = 1;
7512 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007513 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7514 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007515 /* Tx or Rx is outside the enabled policy */
7516 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007517 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007518 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007519 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7520 lwde, ppd->link_width_downgrade_tx_active,
7521 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007522 do_bounce = 1;
7523 }
7524
Dean Luick323fd782015-11-16 21:59:24 -05007525done:
7526 mutex_unlock(&ppd->hls_lock);
7527
Mike Marciniszyn77241052015-07-30 15:17:43 -04007528 if (do_bounce) {
7529 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007530 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007531 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007532 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007533 start_link(ppd);
7534 }
7535}
7536
7537/*
7538 * Handle a link downgrade interrupt from the 8051.
7539 *
7540 * This is a work-queue function outside of the interrupt.
7541 */
7542void handle_link_downgrade(struct work_struct *work)
7543{
7544 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7545 link_downgrade_work);
7546
7547 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7548 apply_link_downgrade_policy(ppd, 1);
7549}
7550
7551static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7552{
7553 return flag_string(buf, buf_len, flags, dcc_err_flags,
7554 ARRAY_SIZE(dcc_err_flags));
7555}
7556
7557static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7558{
7559 return flag_string(buf, buf_len, flags, lcb_err_flags,
7560 ARRAY_SIZE(lcb_err_flags));
7561}
7562
7563static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7564{
7565 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7566 ARRAY_SIZE(dc8051_err_flags));
7567}
7568
7569static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7570{
7571 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7572 ARRAY_SIZE(dc8051_info_err_flags));
7573}
7574
7575static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7576{
7577 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7578 ARRAY_SIZE(dc8051_info_host_msg_flags));
7579}
7580
7581static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7582{
7583 struct hfi1_pportdata *ppd = dd->pport;
7584 u64 info, err, host_msg;
7585 int queue_link_down = 0;
7586 char buf[96];
7587
7588 /* look at the flags */
7589 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7590 /* 8051 information set by firmware */
7591 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7592 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7593 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7594 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7595 host_msg = (info >>
7596 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7597 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7598
7599 /*
7600 * Handle error flags.
7601 */
7602 if (err & FAILED_LNI) {
7603 /*
7604 * LNI error indications are cleared by the 8051
7605 * only when starting polling. Only pay attention
7606 * to them when in the states that occur during
7607 * LNI.
7608 */
7609 if (ppd->host_link_state
7610 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7611 queue_link_down = 1;
7612 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007613 dc8051_info_err_string(buf,
7614 sizeof(buf),
7615 err &
7616 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007617 }
7618 err &= ~(u64)FAILED_LNI;
7619 }
Dean Luick6d014532015-12-01 15:38:23 -05007620 /* unknown frames can happen durning LNI, just count */
7621 if (err & UNKNOWN_FRAME) {
7622 ppd->unknown_frame_count++;
7623 err &= ~(u64)UNKNOWN_FRAME;
7624 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007625 if (err) {
7626 /* report remaining errors, but do not do anything */
7627 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007628 dc8051_info_err_string(buf, sizeof(buf),
7629 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007630 }
7631
7632 /*
7633 * Handle host message flags.
7634 */
7635 if (host_msg & HOST_REQ_DONE) {
7636 /*
7637 * Presently, the driver does a busy wait for
7638 * host requests to complete. This is only an
7639 * informational message.
7640 * NOTE: The 8051 clears the host message
7641 * information *on the next 8051 command*.
7642 * Therefore, when linkup is achieved,
7643 * this flag will still be set.
7644 */
7645 host_msg &= ~(u64)HOST_REQ_DONE;
7646 }
7647 if (host_msg & BC_SMA_MSG) {
7648 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7649 host_msg &= ~(u64)BC_SMA_MSG;
7650 }
7651 if (host_msg & LINKUP_ACHIEVED) {
7652 dd_dev_info(dd, "8051: Link up\n");
7653 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7654 host_msg &= ~(u64)LINKUP_ACHIEVED;
7655 }
7656 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007657 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007658 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7659 }
7660 if (host_msg & VERIFY_CAP_FRAME) {
7661 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7662 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7663 }
7664 if (host_msg & LINK_GOING_DOWN) {
7665 const char *extra = "";
7666 /* no downgrade action needed if going down */
7667 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7668 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7669 extra = " (ignoring downgrade)";
7670 }
7671 dd_dev_info(dd, "8051: Link down%s\n", extra);
7672 queue_link_down = 1;
7673 host_msg &= ~(u64)LINK_GOING_DOWN;
7674 }
7675 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7676 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7677 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7678 }
7679 if (host_msg) {
7680 /* report remaining messages, but do not do anything */
7681 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007682 dc8051_info_host_msg_string(buf,
7683 sizeof(buf),
7684 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007685 }
7686
7687 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7688 }
7689 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7690 /*
7691 * Lost the 8051 heartbeat. If this happens, we
7692 * receive constant interrupts about it. Disable
7693 * the interrupt after the first.
7694 */
7695 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7696 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007697 read_csr(dd, DC_DC8051_ERR_EN) &
7698 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007699
7700 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7701 }
7702 if (reg) {
7703 /* report the error, but do not do anything */
7704 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007705 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007706 }
7707
7708 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007709 /*
7710 * if the link is already going down or disabled, do not
7711 * queue another
7712 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007713 if ((ppd->host_link_state &
7714 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7715 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007716 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007717 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007718 } else {
7719 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7720 }
7721 }
7722}
7723
7724static const char * const fm_config_txt[] = {
7725[0] =
7726 "BadHeadDist: Distance violation between two head flits",
7727[1] =
7728 "BadTailDist: Distance violation between two tail flits",
7729[2] =
7730 "BadCtrlDist: Distance violation between two credit control flits",
7731[3] =
7732 "BadCrdAck: Credits return for unsupported VL",
7733[4] =
7734 "UnsupportedVLMarker: Received VL Marker",
7735[5] =
7736 "BadPreempt: Exceeded the preemption nesting level",
7737[6] =
7738 "BadControlFlit: Received unsupported control flit",
7739/* no 7 */
7740[8] =
7741 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7742};
7743
7744static const char * const port_rcv_txt[] = {
7745[1] =
7746 "BadPktLen: Illegal PktLen",
7747[2] =
7748 "PktLenTooLong: Packet longer than PktLen",
7749[3] =
7750 "PktLenTooShort: Packet shorter than PktLen",
7751[4] =
7752 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7753[5] =
7754 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7755[6] =
7756 "BadL2: Illegal L2 opcode",
7757[7] =
7758 "BadSC: Unsupported SC",
7759[9] =
7760 "BadRC: Illegal RC",
7761[11] =
7762 "PreemptError: Preempting with same VL",
7763[12] =
7764 "PreemptVL15: Preempting a VL15 packet",
7765};
7766
7767#define OPA_LDR_FMCONFIG_OFFSET 16
7768#define OPA_LDR_PORTRCV_OFFSET 0
7769static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7770{
7771 u64 info, hdr0, hdr1;
7772 const char *extra;
7773 char buf[96];
7774 struct hfi1_pportdata *ppd = dd->pport;
7775 u8 lcl_reason = 0;
7776 int do_bounce = 0;
7777
7778 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7779 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7780 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7781 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7782 /* set status bit */
7783 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7784 }
7785 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7786 }
7787
7788 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7789 struct hfi1_pportdata *ppd = dd->pport;
7790 /* this counter saturates at (2^32) - 1 */
7791 if (ppd->link_downed < (u32)UINT_MAX)
7792 ppd->link_downed++;
7793 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7794 }
7795
7796 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7797 u8 reason_valid = 1;
7798
7799 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7800 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7801 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7802 /* set status bit */
7803 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7804 }
7805 switch (info) {
7806 case 0:
7807 case 1:
7808 case 2:
7809 case 3:
7810 case 4:
7811 case 5:
7812 case 6:
7813 extra = fm_config_txt[info];
7814 break;
7815 case 8:
7816 extra = fm_config_txt[info];
7817 if (ppd->port_error_action &
7818 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7819 do_bounce = 1;
7820 /*
7821 * lcl_reason cannot be derived from info
7822 * for this error
7823 */
7824 lcl_reason =
7825 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7826 }
7827 break;
7828 default:
7829 reason_valid = 0;
7830 snprintf(buf, sizeof(buf), "reserved%lld", info);
7831 extra = buf;
7832 break;
7833 }
7834
7835 if (reason_valid && !do_bounce) {
7836 do_bounce = ppd->port_error_action &
7837 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7838 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7839 }
7840
7841 /* just report this */
7842 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7843 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7844 }
7845
7846 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7847 u8 reason_valid = 1;
7848
7849 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7850 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7851 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7852 if (!(dd->err_info_rcvport.status_and_code &
7853 OPA_EI_STATUS_SMASK)) {
7854 dd->err_info_rcvport.status_and_code =
7855 info & OPA_EI_CODE_SMASK;
7856 /* set status bit */
7857 dd->err_info_rcvport.status_and_code |=
7858 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007859 /*
7860 * save first 2 flits in the packet that caused
7861 * the error
7862 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007863 dd->err_info_rcvport.packet_flit1 = hdr0;
7864 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007865 }
7866 switch (info) {
7867 case 1:
7868 case 2:
7869 case 3:
7870 case 4:
7871 case 5:
7872 case 6:
7873 case 7:
7874 case 9:
7875 case 11:
7876 case 12:
7877 extra = port_rcv_txt[info];
7878 break;
7879 default:
7880 reason_valid = 0;
7881 snprintf(buf, sizeof(buf), "reserved%lld", info);
7882 extra = buf;
7883 break;
7884 }
7885
7886 if (reason_valid && !do_bounce) {
7887 do_bounce = ppd->port_error_action &
7888 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7889 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7890 }
7891
7892 /* just report this */
7893 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7894 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007895 hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007896
7897 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7898 }
7899
7900 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7901 /* informative only */
7902 dd_dev_info(dd, "8051 access to LCB blocked\n");
7903 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7904 }
7905 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7906 /* informative only */
7907 dd_dev_info(dd, "host access to LCB blocked\n");
7908 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7909 }
7910
7911 /* report any remaining errors */
7912 if (reg)
7913 dd_dev_info(dd, "DCC Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007914 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007915
7916 if (lcl_reason == 0)
7917 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7918
7919 if (do_bounce) {
7920 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7921 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7922 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7923 }
7924}
7925
7926static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7927{
7928 char buf[96];
7929
7930 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007931 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007932}
7933
7934/*
7935 * CCE block DC interrupt. Source is < 8.
7936 */
7937static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7938{
7939 const struct err_reg_info *eri = &dc_errs[source];
7940
7941 if (eri->handler) {
7942 interrupt_clear_down(dd, 0, eri);
7943 } else if (source == 3 /* dc_lbm_int */) {
7944 /*
7945 * This indicates that a parity error has occurred on the
7946 * address/control lines presented to the LBM. The error
7947 * is a single pulse, there is no associated error flag,
7948 * and it is non-maskable. This is because if a parity
7949 * error occurs on the request the request is dropped.
7950 * This should never occur, but it is nice to know if it
7951 * ever does.
7952 */
7953 dd_dev_err(dd, "Parity error in DC LBM block\n");
7954 } else {
7955 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7956 }
7957}
7958
7959/*
7960 * TX block send credit interrupt. Source is < 160.
7961 */
7962static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7963{
7964 sc_group_release_update(dd, source);
7965}
7966
7967/*
7968 * TX block SDMA interrupt. Source is < 48.
7969 *
7970 * SDMA interrupts are grouped by type:
7971 *
7972 * 0 - N-1 = SDma
7973 * N - 2N-1 = SDmaProgress
7974 * 2N - 3N-1 = SDmaIdle
7975 */
7976static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7977{
7978 /* what interrupt */
7979 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7980 /* which engine */
7981 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7982
7983#ifdef CONFIG_SDMA_VERBOSITY
7984 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7985 slashstrip(__FILE__), __LINE__, __func__);
7986 sdma_dumpstate(&dd->per_sdma[which]);
7987#endif
7988
7989 if (likely(what < 3 && which < dd->num_sdma)) {
7990 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7991 } else {
7992 /* should not happen */
7993 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7994 }
7995}
7996
7997/*
7998 * RX block receive available interrupt. Source is < 160.
7999 */
8000static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8001{
8002 struct hfi1_ctxtdata *rcd;
8003 char *err_detail;
8004
8005 if (likely(source < dd->num_rcv_contexts)) {
8006 rcd = dd->rcd[source];
8007 if (rcd) {
8008 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008009 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008010 else
8011 handle_user_interrupt(rcd);
8012 return; /* OK */
8013 }
8014 /* received an interrupt, but no rcd */
8015 err_detail = "dataless";
8016 } else {
8017 /* received an interrupt, but are not using that context */
8018 err_detail = "out of range";
8019 }
8020 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008021 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008022}
8023
8024/*
8025 * RX block receive urgent interrupt. Source is < 160.
8026 */
8027static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8028{
8029 struct hfi1_ctxtdata *rcd;
8030 char *err_detail;
8031
8032 if (likely(source < dd->num_rcv_contexts)) {
8033 rcd = dd->rcd[source];
8034 if (rcd) {
8035 /* only pay attention to user urgent interrupts */
8036 if (source >= dd->first_user_ctxt)
8037 handle_user_interrupt(rcd);
8038 return; /* OK */
8039 }
8040 /* received an interrupt, but no rcd */
8041 err_detail = "dataless";
8042 } else {
8043 /* received an interrupt, but are not using that context */
8044 err_detail = "out of range";
8045 }
8046 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008047 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008048}
8049
8050/*
8051 * Reserved range interrupt. Should not be called in normal operation.
8052 */
8053static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8054{
8055 char name[64];
8056
8057 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008058 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008059}
8060
8061static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008062/*
8063 * start end
8064 * name func interrupt func
8065 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008066{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8067 is_misc_err_name, is_misc_err_int },
8068{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8069 is_sdma_eng_err_name, is_sdma_eng_err_int },
8070{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8071 is_sendctxt_err_name, is_sendctxt_err_int },
8072{ IS_SDMA_START, IS_SDMA_END,
8073 is_sdma_eng_name, is_sdma_eng_int },
8074{ IS_VARIOUS_START, IS_VARIOUS_END,
8075 is_various_name, is_various_int },
8076{ IS_DC_START, IS_DC_END,
8077 is_dc_name, is_dc_int },
8078{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8079 is_rcv_avail_name, is_rcv_avail_int },
8080{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8081 is_rcv_urgent_name, is_rcv_urgent_int },
8082{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8083 is_send_credit_name, is_send_credit_int},
8084{ IS_RESERVED_START, IS_RESERVED_END,
8085 is_reserved_name, is_reserved_int},
8086};
8087
8088/*
8089 * Interrupt source interrupt - called when the given source has an interrupt.
8090 * Source is a bit index into an array of 64-bit integers.
8091 */
8092static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8093{
8094 const struct is_table *entry;
8095
8096 /* avoids a double compare by walking the table in-order */
8097 for (entry = &is_table[0]; entry->is_name; entry++) {
8098 if (source < entry->end) {
8099 trace_hfi1_interrupt(dd, entry, source);
8100 entry->is_int(dd, source - entry->start);
8101 return;
8102 }
8103 }
8104 /* fell off the end */
8105 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8106}
8107
8108/*
8109 * General interrupt handler. This is able to correctly handle
8110 * all interrupts in case INTx is used.
8111 */
8112static irqreturn_t general_interrupt(int irq, void *data)
8113{
8114 struct hfi1_devdata *dd = data;
8115 u64 regs[CCE_NUM_INT_CSRS];
8116 u32 bit;
8117 int i;
8118
8119 this_cpu_inc(*dd->int_counter);
8120
8121 /* phase 1: scan and clear all handled interrupts */
8122 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8123 if (dd->gi_mask[i] == 0) {
8124 regs[i] = 0; /* used later */
8125 continue;
8126 }
8127 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8128 dd->gi_mask[i];
8129 /* only clear if anything is set */
8130 if (regs[i])
8131 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8132 }
8133
8134 /* phase 2: call the appropriate handler */
8135 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008136 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008137 is_interrupt(dd, bit);
8138 }
8139
8140 return IRQ_HANDLED;
8141}
8142
8143static irqreturn_t sdma_interrupt(int irq, void *data)
8144{
8145 struct sdma_engine *sde = data;
8146 struct hfi1_devdata *dd = sde->dd;
8147 u64 status;
8148
8149#ifdef CONFIG_SDMA_VERBOSITY
8150 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8151 slashstrip(__FILE__), __LINE__, __func__);
8152 sdma_dumpstate(sde);
8153#endif
8154
8155 this_cpu_inc(*dd->int_counter);
8156
8157 /* This read_csr is really bad in the hot path */
8158 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008159 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8160 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008161 if (likely(status)) {
8162 /* clear the interrupt(s) */
8163 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008164 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8165 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008166
8167 /* handle the interrupt(s) */
8168 sdma_engine_interrupt(sde, status);
8169 } else
8170 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008171 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008172
8173 return IRQ_HANDLED;
8174}
8175
8176/*
Dean Luickecd42f82016-02-03 14:35:14 -08008177 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8178 * to insure that the write completed. This does NOT guarantee that
8179 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008180 */
8181static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8182{
8183 struct hfi1_devdata *dd = rcd->dd;
8184 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8185
8186 mmiowb(); /* make sure everything before is written */
8187 write_csr(dd, addr, rcd->imask);
8188 /* force the above write on the chip and get a value back */
8189 (void)read_csr(dd, addr);
8190}
8191
8192/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008193void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008194{
8195 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8196}
8197
Dean Luickecd42f82016-02-03 14:35:14 -08008198/*
8199 * Return non-zero if a packet is present.
8200 *
8201 * This routine is called when rechecking for packets after the RcvAvail
8202 * interrupt has been cleared down. First, do a quick check of memory for
8203 * a packet present. If not found, use an expensive CSR read of the context
8204 * tail to determine the actual tail. The CSR read is necessary because there
8205 * is no method to push pending DMAs to memory other than an interrupt and we
8206 * are trying to determine if we need to force an interrupt.
8207 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008208static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8209{
Dean Luickecd42f82016-02-03 14:35:14 -08008210 u32 tail;
8211 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008212
Dean Luickecd42f82016-02-03 14:35:14 -08008213 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8214 present = (rcd->seq_cnt ==
8215 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8216 else /* is RDMA rtail */
8217 present = (rcd->head != get_rcvhdrtail(rcd));
8218
8219 if (present)
8220 return 1;
8221
8222 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8223 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8224 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008225}
8226
8227/*
8228 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8229 * This routine will try to handle packets immediately (latency), but if
8230 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008231 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008232 * invoked) is finished. The intent is to avoid extra interrupts while we
8233 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008234 */
8235static irqreturn_t receive_context_interrupt(int irq, void *data)
8236{
8237 struct hfi1_ctxtdata *rcd = data;
8238 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008239 int disposition;
8240 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008241
8242 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8243 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008244 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008245
Dean Luickf4f30031c2015-10-26 10:28:44 -04008246 /* receive interrupt remains blocked while processing packets */
8247 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008248
Dean Luickf4f30031c2015-10-26 10:28:44 -04008249 /*
8250 * Too many packets were seen while processing packets in this
8251 * IRQ handler. Invoke the handler thread. The receive interrupt
8252 * remains blocked.
8253 */
8254 if (disposition == RCV_PKT_LIMIT)
8255 return IRQ_WAKE_THREAD;
8256
8257 /*
8258 * The packet processor detected no more packets. Clear the receive
8259 * interrupt and recheck for a packet packet that may have arrived
8260 * after the previous check and interrupt clear. If a packet arrived,
8261 * force another interrupt.
8262 */
8263 clear_recv_intr(rcd);
8264 present = check_packet_present(rcd);
8265 if (present)
8266 force_recv_intr(rcd);
8267
8268 return IRQ_HANDLED;
8269}
8270
8271/*
8272 * Receive packet thread handler. This expects to be invoked with the
8273 * receive interrupt still blocked.
8274 */
8275static irqreturn_t receive_context_thread(int irq, void *data)
8276{
8277 struct hfi1_ctxtdata *rcd = data;
8278 int present;
8279
8280 /* receive interrupt is still blocked from the IRQ handler */
8281 (void)rcd->do_interrupt(rcd, 1);
8282
8283 /*
8284 * The packet processor will only return if it detected no more
8285 * packets. Hold IRQs here so we can safely clear the interrupt and
8286 * recheck for a packet that may have arrived after the previous
8287 * check and the interrupt clear. If a packet arrived, force another
8288 * interrupt.
8289 */
8290 local_irq_disable();
8291 clear_recv_intr(rcd);
8292 present = check_packet_present(rcd);
8293 if (present)
8294 force_recv_intr(rcd);
8295 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008296
8297 return IRQ_HANDLED;
8298}
8299
8300/* ========================================================================= */
8301
8302u32 read_physical_state(struct hfi1_devdata *dd)
8303{
8304 u64 reg;
8305
8306 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8307 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8308 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8309}
8310
Jim Snowfb9036d2016-01-11 18:32:21 -05008311u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008312{
8313 u64 reg;
8314
8315 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8316 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8317 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8318}
8319
8320static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8321{
8322 u64 reg;
8323
8324 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8325 /* clear current state, set new state */
8326 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8327 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8328 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8329}
8330
8331/*
8332 * Use the 8051 to read a LCB CSR.
8333 */
8334static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8335{
8336 u32 regno;
8337 int ret;
8338
8339 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8340 if (acquire_lcb_access(dd, 0) == 0) {
8341 *data = read_csr(dd, addr);
8342 release_lcb_access(dd, 0);
8343 return 0;
8344 }
8345 return -EBUSY;
8346 }
8347
8348 /* register is an index of LCB registers: (offset - base) / 8 */
8349 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8350 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8351 if (ret != HCMD_SUCCESS)
8352 return -EBUSY;
8353 return 0;
8354}
8355
8356/*
8357 * Read an LCB CSR. Access may not be in host control, so check.
8358 * Return 0 on success, -EBUSY on failure.
8359 */
8360int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8361{
8362 struct hfi1_pportdata *ppd = dd->pport;
8363
8364 /* if up, go through the 8051 for the value */
8365 if (ppd->host_link_state & HLS_UP)
8366 return read_lcb_via_8051(dd, addr, data);
8367 /* if going up or down, no access */
8368 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8369 return -EBUSY;
8370 /* otherwise, host has access */
8371 *data = read_csr(dd, addr);
8372 return 0;
8373}
8374
8375/*
8376 * Use the 8051 to write a LCB CSR.
8377 */
8378static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8379{
Dean Luick3bf40d62015-11-06 20:07:04 -05008380 u32 regno;
8381 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008382
Dean Luick3bf40d62015-11-06 20:07:04 -05008383 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8384 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8385 if (acquire_lcb_access(dd, 0) == 0) {
8386 write_csr(dd, addr, data);
8387 release_lcb_access(dd, 0);
8388 return 0;
8389 }
8390 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008391 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008392
8393 /* register is an index of LCB registers: (offset - base) / 8 */
8394 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8395 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8396 if (ret != HCMD_SUCCESS)
8397 return -EBUSY;
8398 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008399}
8400
8401/*
8402 * Write an LCB CSR. Access may not be in host control, so check.
8403 * Return 0 on success, -EBUSY on failure.
8404 */
8405int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8406{
8407 struct hfi1_pportdata *ppd = dd->pport;
8408
8409 /* if up, go through the 8051 for the value */
8410 if (ppd->host_link_state & HLS_UP)
8411 return write_lcb_via_8051(dd, addr, data);
8412 /* if going up or down, no access */
8413 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8414 return -EBUSY;
8415 /* otherwise, host has access */
8416 write_csr(dd, addr, data);
8417 return 0;
8418}
8419
8420/*
8421 * Returns:
8422 * < 0 = Linux error, not able to get access
8423 * > 0 = 8051 command RETURN_CODE
8424 */
8425static int do_8051_command(
8426 struct hfi1_devdata *dd,
8427 u32 type,
8428 u64 in_data,
8429 u64 *out_data)
8430{
8431 u64 reg, completed;
8432 int return_code;
8433 unsigned long flags;
8434 unsigned long timeout;
8435
8436 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8437
8438 /*
8439 * Alternative to holding the lock for a long time:
8440 * - keep busy wait - have other users bounce off
8441 */
8442 spin_lock_irqsave(&dd->dc8051_lock, flags);
8443
8444 /* We can't send any commands to the 8051 if it's in reset */
8445 if (dd->dc_shutdown) {
8446 return_code = -ENODEV;
8447 goto fail;
8448 }
8449
8450 /*
8451 * If an 8051 host command timed out previously, then the 8051 is
8452 * stuck.
8453 *
8454 * On first timeout, attempt to reset and restart the entire DC
8455 * block (including 8051). (Is this too big of a hammer?)
8456 *
8457 * If the 8051 times out a second time, the reset did not bring it
8458 * back to healthy life. In that case, fail any subsequent commands.
8459 */
8460 if (dd->dc8051_timed_out) {
8461 if (dd->dc8051_timed_out > 1) {
8462 dd_dev_err(dd,
8463 "Previous 8051 host command timed out, skipping command %u\n",
8464 type);
8465 return_code = -ENXIO;
8466 goto fail;
8467 }
8468 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8469 dc_shutdown(dd);
8470 dc_start(dd);
8471 spin_lock_irqsave(&dd->dc8051_lock, flags);
8472 }
8473
8474 /*
8475 * If there is no timeout, then the 8051 command interface is
8476 * waiting for a command.
8477 */
8478
8479 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008480 * When writing a LCB CSR, out_data contains the full value to
8481 * to be written, while in_data contains the relative LCB
8482 * address in 7:0. Do the work here, rather than the caller,
8483 * of distrubting the write data to where it needs to go:
8484 *
8485 * Write data
8486 * 39:00 -> in_data[47:8]
8487 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8488 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8489 */
8490 if (type == HCMD_WRITE_LCB_CSR) {
8491 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8492 reg = ((((*out_data) >> 40) & 0xff) <<
8493 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8494 | ((((*out_data) >> 48) & 0xffff) <<
8495 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8496 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8497 }
8498
8499 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008500 * Do two writes: the first to stabilize the type and req_data, the
8501 * second to activate.
8502 */
8503 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8504 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8505 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8506 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8507 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8508 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8509 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8510
8511 /* wait for completion, alternate: interrupt */
8512 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8513 while (1) {
8514 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8515 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8516 if (completed)
8517 break;
8518 if (time_after(jiffies, timeout)) {
8519 dd->dc8051_timed_out++;
8520 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8521 if (out_data)
8522 *out_data = 0;
8523 return_code = -ETIMEDOUT;
8524 goto fail;
8525 }
8526 udelay(2);
8527 }
8528
8529 if (out_data) {
8530 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8531 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8532 if (type == HCMD_READ_LCB_CSR) {
8533 /* top 16 bits are in a different register */
8534 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8535 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8536 << (48
8537 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8538 }
8539 }
8540 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8541 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8542 dd->dc8051_timed_out = 0;
8543 /*
8544 * Clear command for next user.
8545 */
8546 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8547
8548fail:
8549 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8550
8551 return return_code;
8552}
8553
8554static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8555{
8556 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8557}
8558
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008559int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8560 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008561{
8562 u64 data;
8563 int ret;
8564
8565 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8566 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8567 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8568 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8569 if (ret != HCMD_SUCCESS) {
8570 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008571 "load 8051 config: field id %d, lane %d, err %d\n",
8572 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008573 }
8574 return ret;
8575}
8576
8577/*
8578 * Read the 8051 firmware "registers". Use the RAM directly. Always
8579 * set the result, even on error.
8580 * Return 0 on success, -errno on failure
8581 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008582int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8583 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008584{
8585 u64 big_data;
8586 u32 addr;
8587 int ret;
8588
8589 /* address start depends on the lane_id */
8590 if (lane_id < 4)
8591 addr = (4 * NUM_GENERAL_FIELDS)
8592 + (lane_id * 4 * NUM_LANE_FIELDS);
8593 else
8594 addr = 0;
8595 addr += field_id * 4;
8596
8597 /* read is in 8-byte chunks, hardware will truncate the address down */
8598 ret = read_8051_data(dd, addr, 8, &big_data);
8599
8600 if (ret == 0) {
8601 /* extract the 4 bytes we want */
8602 if (addr & 0x4)
8603 *result = (u32)(big_data >> 32);
8604 else
8605 *result = (u32)big_data;
8606 } else {
8607 *result = 0;
8608 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008609 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008610 }
8611
8612 return ret;
8613}
8614
8615static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8616 u8 continuous)
8617{
8618 u32 frame;
8619
8620 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8621 | power_management << POWER_MANAGEMENT_SHIFT;
8622 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8623 GENERAL_CONFIG, frame);
8624}
8625
8626static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8627 u16 vl15buf, u8 crc_sizes)
8628{
8629 u32 frame;
8630
8631 frame = (u32)vau << VAU_SHIFT
8632 | (u32)z << Z_SHIFT
8633 | (u32)vcu << VCU_SHIFT
8634 | (u32)vl15buf << VL15BUF_SHIFT
8635 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8636 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8637 GENERAL_CONFIG, frame);
8638}
8639
8640static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8641 u8 *flag_bits, u16 *link_widths)
8642{
8643 u32 frame;
8644
8645 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008646 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008647 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8648 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8649 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8650}
8651
8652static int write_vc_local_link_width(struct hfi1_devdata *dd,
8653 u8 misc_bits,
8654 u8 flag_bits,
8655 u16 link_widths)
8656{
8657 u32 frame;
8658
8659 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8660 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8661 | (u32)link_widths << LINK_WIDTH_SHIFT;
8662 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8663 frame);
8664}
8665
8666static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8667 u8 device_rev)
8668{
8669 u32 frame;
8670
8671 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8672 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8673 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8674}
8675
8676static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8677 u8 *device_rev)
8678{
8679 u32 frame;
8680
8681 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8682 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8683 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8684 & REMOTE_DEVICE_REV_MASK;
8685}
8686
8687void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8688{
8689 u32 frame;
8690
8691 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8692 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8693 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8694}
8695
8696static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8697 u8 *continuous)
8698{
8699 u32 frame;
8700
8701 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8702 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8703 & POWER_MANAGEMENT_MASK;
8704 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8705 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8706}
8707
8708static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8709 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8710{
8711 u32 frame;
8712
8713 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8714 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8715 *z = (frame >> Z_SHIFT) & Z_MASK;
8716 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8717 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8718 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8719}
8720
8721static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8722 u8 *remote_tx_rate,
8723 u16 *link_widths)
8724{
8725 u32 frame;
8726
8727 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008728 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008729 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8730 & REMOTE_TX_RATE_MASK;
8731 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8732}
8733
8734static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8735{
8736 u32 frame;
8737
8738 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8739 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8740}
8741
8742static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8743{
8744 u32 frame;
8745
8746 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8747 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8748}
8749
8750static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8751{
8752 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8753}
8754
8755static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8756{
8757 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8758}
8759
8760void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8761{
8762 u32 frame;
8763 int ret;
8764
8765 *link_quality = 0;
8766 if (dd->pport->host_link_state & HLS_UP) {
8767 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008768 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008769 if (ret == 0)
8770 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8771 & LINK_QUALITY_MASK;
8772 }
8773}
8774
8775static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8776{
8777 u32 frame;
8778
8779 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8780 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8781}
8782
Dean Luickfeb831d2016-04-14 08:31:36 -07008783static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8784{
8785 u32 frame;
8786
8787 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8788 *ldr = (frame & 0xff);
8789}
8790
Mike Marciniszyn77241052015-07-30 15:17:43 -04008791static int read_tx_settings(struct hfi1_devdata *dd,
8792 u8 *enable_lane_tx,
8793 u8 *tx_polarity_inversion,
8794 u8 *rx_polarity_inversion,
8795 u8 *max_rate)
8796{
8797 u32 frame;
8798 int ret;
8799
8800 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8801 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8802 & ENABLE_LANE_TX_MASK;
8803 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8804 & TX_POLARITY_INVERSION_MASK;
8805 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8806 & RX_POLARITY_INVERSION_MASK;
8807 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8808 return ret;
8809}
8810
8811static int write_tx_settings(struct hfi1_devdata *dd,
8812 u8 enable_lane_tx,
8813 u8 tx_polarity_inversion,
8814 u8 rx_polarity_inversion,
8815 u8 max_rate)
8816{
8817 u32 frame;
8818
8819 /* no need to mask, all variable sizes match field widths */
8820 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8821 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8822 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8823 | max_rate << MAX_RATE_SHIFT;
8824 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8825}
8826
8827static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8828{
8829 u32 frame, version, prod_id;
8830 int ret, lane;
8831
8832 /* 4 lanes */
8833 for (lane = 0; lane < 4; lane++) {
8834 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8835 if (ret) {
Jubin John17fb4f22016-02-14 20:21:52 -08008836 dd_dev_err(dd,
8837 "Unable to read lane %d firmware details\n",
8838 lane);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008839 continue;
8840 }
8841 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8842 & SPICO_ROM_VERSION_MASK;
8843 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8844 & SPICO_ROM_PROD_ID_MASK;
8845 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008846 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8847 lane, version, prod_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008848 }
8849}
8850
8851/*
8852 * Read an idle LCB message.
8853 *
8854 * Returns 0 on success, -EINVAL on error
8855 */
8856static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8857{
8858 int ret;
8859
Jubin John17fb4f22016-02-14 20:21:52 -08008860 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008861 if (ret != HCMD_SUCCESS) {
8862 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008863 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008864 return -EINVAL;
8865 }
8866 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8867 /* return only the payload as we already know the type */
8868 *data_out >>= IDLE_PAYLOAD_SHIFT;
8869 return 0;
8870}
8871
8872/*
8873 * Read an idle SMA message. To be done in response to a notification from
8874 * the 8051.
8875 *
8876 * Returns 0 on success, -EINVAL on error
8877 */
8878static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8879{
Jubin John17fb4f22016-02-14 20:21:52 -08008880 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8881 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008882}
8883
8884/*
8885 * Send an idle LCB message.
8886 *
8887 * Returns 0 on success, -EINVAL on error
8888 */
8889static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8890{
8891 int ret;
8892
8893 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8894 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8895 if (ret != HCMD_SUCCESS) {
8896 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008897 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008898 return -EINVAL;
8899 }
8900 return 0;
8901}
8902
8903/*
8904 * Send an idle SMA message.
8905 *
8906 * Returns 0 on success, -EINVAL on error
8907 */
8908int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8909{
8910 u64 data;
8911
Jubin John17fb4f22016-02-14 20:21:52 -08008912 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8913 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008914 return send_idle_message(dd, data);
8915}
8916
8917/*
8918 * Initialize the LCB then do a quick link up. This may or may not be
8919 * in loopback.
8920 *
8921 * return 0 on success, -errno on error
8922 */
8923static int do_quick_linkup(struct hfi1_devdata *dd)
8924{
8925 u64 reg;
8926 unsigned long timeout;
8927 int ret;
8928
8929 lcb_shutdown(dd, 0);
8930
8931 if (loopback) {
8932 /* LCB_CFG_LOOPBACK.VAL = 2 */
8933 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8934 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008935 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008936 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8937 }
8938
8939 /* start the LCBs */
8940 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8941 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8942
8943 /* simulator only loopback steps */
8944 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8945 /* LCB_CFG_RUN.EN = 1 */
8946 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008947 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008948
8949 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8950 timeout = jiffies + msecs_to_jiffies(10);
8951 while (1) {
Jubin John17fb4f22016-02-14 20:21:52 -08008952 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008953 if (reg)
8954 break;
8955 if (time_after(jiffies, timeout)) {
8956 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008957 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008958 return -ETIMEDOUT;
8959 }
8960 udelay(2);
8961 }
8962
8963 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008964 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008965 }
8966
8967 if (!loopback) {
8968 /*
8969 * When doing quick linkup and not in loopback, both
8970 * sides must be done with LCB set-up before either
8971 * starts the quick linkup. Put a delay here so that
8972 * both sides can be started and have a chance to be
8973 * done with LCB set up before resuming.
8974 */
8975 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008976 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008977 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008978 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008979 }
8980
8981 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8982 set_8051_lcb_access(dd);
8983
8984 /*
8985 * State "quick" LinkUp request sets the physical link state to
8986 * LinkUp without a verify capability sequence.
8987 * This state is in simulator v37 and later.
8988 */
8989 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8990 if (ret != HCMD_SUCCESS) {
8991 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008992 "%s: set physical link state to quick LinkUp failed with return %d\n",
8993 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008994
8995 set_host_lcb_access(dd);
8996 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8997
8998 if (ret >= 0)
8999 ret = -EINVAL;
9000 return ret;
9001 }
9002
9003 return 0; /* success */
9004}
9005
9006/*
9007 * Set the SerDes to internal loopback mode.
9008 * Returns 0 on success, -errno on error.
9009 */
9010static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9011{
9012 int ret;
9013
9014 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9015 if (ret == HCMD_SUCCESS)
9016 return 0;
9017 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009018 "Set physical link state to SerDes Loopback failed with return %d\n",
9019 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009020 if (ret >= 0)
9021 ret = -EINVAL;
9022 return ret;
9023}
9024
9025/*
9026 * Do all special steps to set up loopback.
9027 */
9028static int init_loopback(struct hfi1_devdata *dd)
9029{
9030 dd_dev_info(dd, "Entering loopback mode\n");
9031
9032 /* all loopbacks should disable self GUID check */
9033 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009034 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009035
9036 /*
9037 * The simulator has only one loopback option - LCB. Switch
9038 * to that option, which includes quick link up.
9039 *
9040 * Accept all valid loopback values.
9041 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009042 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9043 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9044 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009045 loopback = LOOPBACK_LCB;
9046 quick_linkup = 1;
9047 return 0;
9048 }
9049
9050 /* handle serdes loopback */
9051 if (loopback == LOOPBACK_SERDES) {
9052 /* internal serdes loopack needs quick linkup on RTL */
9053 if (dd->icode == ICODE_RTL_SILICON)
9054 quick_linkup = 1;
9055 return set_serdes_loopback_mode(dd);
9056 }
9057
9058 /* LCB loopback - handled at poll time */
9059 if (loopback == LOOPBACK_LCB) {
9060 quick_linkup = 1; /* LCB is always quick linkup */
9061
9062 /* not supported in emulation due to emulation RTL changes */
9063 if (dd->icode == ICODE_FPGA_EMULATION) {
9064 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009065 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009066 return -EINVAL;
9067 }
9068 return 0;
9069 }
9070
9071 /* external cable loopback requires no extra steps */
9072 if (loopback == LOOPBACK_CABLE)
9073 return 0;
9074
9075 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9076 return -EINVAL;
9077}
9078
9079/*
9080 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9081 * used in the Verify Capability link width attribute.
9082 */
9083static u16 opa_to_vc_link_widths(u16 opa_widths)
9084{
9085 int i;
9086 u16 result = 0;
9087
9088 static const struct link_bits {
9089 u16 from;
9090 u16 to;
9091 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009092 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9093 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9094 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9095 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009096 };
9097
9098 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9099 if (opa_widths & opa_link_xlate[i].from)
9100 result |= opa_link_xlate[i].to;
9101 }
9102 return result;
9103}
9104
9105/*
9106 * Set link attributes before moving to polling.
9107 */
9108static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9109{
9110 struct hfi1_devdata *dd = ppd->dd;
9111 u8 enable_lane_tx;
9112 u8 tx_polarity_inversion;
9113 u8 rx_polarity_inversion;
9114 int ret;
9115
9116 /* reset our fabric serdes to clear any lingering problems */
9117 fabric_serdes_reset(dd);
9118
9119 /* set the local tx rate - need to read-modify-write */
9120 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009121 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009122 if (ret)
9123 goto set_local_link_attributes_fail;
9124
9125 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9126 /* set the tx rate to the fastest enabled */
9127 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9128 ppd->local_tx_rate = 1;
9129 else
9130 ppd->local_tx_rate = 0;
9131 } else {
9132 /* set the tx rate to all enabled */
9133 ppd->local_tx_rate = 0;
9134 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9135 ppd->local_tx_rate |= 2;
9136 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9137 ppd->local_tx_rate |= 1;
9138 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009139
9140 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009141 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009142 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009143 if (ret != HCMD_SUCCESS)
9144 goto set_local_link_attributes_fail;
9145
9146 /*
9147 * DC supports continuous updates.
9148 */
Jubin John17fb4f22016-02-14 20:21:52 -08009149 ret = write_vc_local_phy(dd,
9150 0 /* no power management */,
9151 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009152 if (ret != HCMD_SUCCESS)
9153 goto set_local_link_attributes_fail;
9154
9155 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9156 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9157 ppd->port_crc_mode_enabled);
9158 if (ret != HCMD_SUCCESS)
9159 goto set_local_link_attributes_fail;
9160
9161 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009162 opa_to_vc_link_widths(
9163 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009164 if (ret != HCMD_SUCCESS)
9165 goto set_local_link_attributes_fail;
9166
9167 /* let peer know who we are */
9168 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9169 if (ret == HCMD_SUCCESS)
9170 return 0;
9171
9172set_local_link_attributes_fail:
9173 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009174 "Failed to set local link attributes, return 0x%x\n",
9175 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009176 return ret;
9177}
9178
9179/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009180 * Call this to start the link.
9181 * Do not do anything if the link is disabled.
9182 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009183 */
9184int start_link(struct hfi1_pportdata *ppd)
9185{
9186 if (!ppd->link_enabled) {
9187 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009188 "%s: stopping link start because link is disabled\n",
9189 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009190 return 0;
9191 }
9192 if (!ppd->driver_link_ready) {
9193 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009194 "%s: stopping link start because driver is not ready\n",
9195 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009196 return 0;
9197 }
9198
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009199 /*
9200 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9201 * pkey table can be configured properly if the HFI unit is connected
9202 * to switch port with MgmtAllowed=NO
9203 */
9204 clear_full_mgmt_pkey(ppd);
9205
Easwar Hariharan623bba22016-04-12 11:25:57 -07009206 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009207}
9208
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009209static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9210{
9211 struct hfi1_devdata *dd = ppd->dd;
9212 u64 mask;
9213 unsigned long timeout;
9214
9215 /*
9216 * Check for QSFP interrupt for t_init (SFF 8679)
9217 */
9218 timeout = jiffies + msecs_to_jiffies(2000);
9219 while (1) {
9220 mask = read_csr(dd, dd->hfi1_id ?
9221 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9222 if (!(mask & QSFP_HFI0_INT_N)) {
9223 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9224 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9225 break;
9226 }
9227 if (time_after(jiffies, timeout)) {
9228 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9229 __func__);
9230 break;
9231 }
9232 udelay(2);
9233 }
9234}
9235
9236static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9237{
9238 struct hfi1_devdata *dd = ppd->dd;
9239 u64 mask;
9240
9241 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9242 if (enable)
9243 mask |= (u64)QSFP_HFI0_INT_N;
9244 else
9245 mask &= ~(u64)QSFP_HFI0_INT_N;
9246 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9247}
9248
9249void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009250{
9251 struct hfi1_devdata *dd = ppd->dd;
9252 u64 mask, qsfp_mask;
9253
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009254 /* Disable INT_N from triggering QSFP interrupts */
9255 set_qsfp_int_n(ppd, 0);
9256
9257 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009258 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009259
9260 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009261 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009262 qsfp_mask &= ~mask;
9263 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009264 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009265
9266 udelay(10);
9267
9268 qsfp_mask |= mask;
9269 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009270 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009271
9272 wait_for_qsfp_init(ppd);
9273
9274 /*
9275 * Allow INT_N to trigger the QSFP interrupt to watch
9276 * for alarms and warnings
9277 */
9278 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009279}
9280
9281static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9282 u8 *qsfp_interrupt_status)
9283{
9284 struct hfi1_devdata *dd = ppd->dd;
9285
9286 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009287 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9288 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9289 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009290
9291 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009292 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9293 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9294 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009295
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009296 /*
9297 * The remaining alarms/warnings don't matter if the link is down.
9298 */
9299 if (ppd->host_link_state & HLS_DOWN)
9300 return 0;
9301
Mike Marciniszyn77241052015-07-30 15:17:43 -04009302 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009303 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9304 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9305 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009306
9307 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009308 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9309 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9310 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009311
9312 /* Byte 2 is vendor specific */
9313
9314 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009315 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9316 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9317 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009318
9319 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009320 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9321 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9322 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009323
9324 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009325 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9326 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9327 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009328
9329 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009330 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9331 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9332 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009333
9334 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009335 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9336 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9337 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009338
9339 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009340 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9341 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9342 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009343
9344 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009345 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9346 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9347 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009348
9349 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009350 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9351 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9352 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009353
9354 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009355 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9356 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9357 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009358
9359 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009360 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9361 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9362 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009363
9364 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009365 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9366 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9367 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009368
9369 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009370 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9371 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9372 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009373
9374 /* Bytes 9-10 and 11-12 are reserved */
9375 /* Bytes 13-15 are vendor specific */
9376
9377 return 0;
9378}
9379
Easwar Hariharan623bba22016-04-12 11:25:57 -07009380/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009381void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009382{
9383 struct qsfp_data *qd;
9384 struct hfi1_pportdata *ppd;
9385 struct hfi1_devdata *dd;
9386
9387 qd = container_of(work, struct qsfp_data, qsfp_work);
9388 ppd = qd->ppd;
9389 dd = ppd->dd;
9390
9391 /* Sanity check */
9392 if (!qsfp_mod_present(ppd))
9393 return;
9394
9395 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009396 * Turn DC back on after cable has been re-inserted. Up until
9397 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009398 */
9399 dc_start(dd);
9400
9401 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009402 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009403
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009404 wait_for_qsfp_init(ppd);
9405
9406 /*
9407 * Allow INT_N to trigger the QSFP interrupt to watch
9408 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009409 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009410 set_qsfp_int_n(ppd, 1);
9411
9412 tune_serdes(ppd);
9413
9414 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009415 }
9416
9417 if (qd->check_interrupt_flags) {
9418 u8 qsfp_interrupt_status[16] = {0,};
9419
Dean Luick765a6fa2016-03-05 08:50:06 -08009420 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9421 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009422 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009423 "%s: Failed to read status of QSFP module\n",
9424 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009425 } else {
9426 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009427
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009428 handle_qsfp_error_conditions(
9429 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009430 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9431 ppd->qsfp_info.check_interrupt_flags = 0;
9432 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009433 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009434 }
9435 }
9436}
9437
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009438static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009439{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009440 struct hfi1_pportdata *ppd = dd->pport;
9441 u64 qsfp_mask, cce_int_mask;
9442 const int qsfp1_int_smask = QSFP1_INT % 64;
9443 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009444
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009445 /*
9446 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9447 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9448 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9449 * the index of the appropriate CSR in the CCEIntMask CSR array
9450 */
9451 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9452 (8 * (QSFP1_INT / 64)));
9453 if (dd->hfi1_id) {
9454 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9455 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9456 cce_int_mask);
9457 } else {
9458 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9459 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9460 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009461 }
9462
Mike Marciniszyn77241052015-07-30 15:17:43 -04009463 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9464 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009465 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9466 qsfp_mask);
9467 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9468 qsfp_mask);
9469
9470 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009471
9472 /* Handle active low nature of INT_N and MODPRST_N pins */
9473 if (qsfp_mod_present(ppd))
9474 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9475 write_csr(dd,
9476 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9477 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009478}
9479
Dean Luickbbdeb332015-12-01 15:38:15 -05009480/*
9481 * Do a one-time initialize of the LCB block.
9482 */
9483static void init_lcb(struct hfi1_devdata *dd)
9484{
Dean Luicka59329d2016-02-03 14:32:31 -08009485 /* simulator does not correctly handle LCB cclk loopback, skip */
9486 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9487 return;
9488
Dean Luickbbdeb332015-12-01 15:38:15 -05009489 /* the DC has been reset earlier in the driver load */
9490
9491 /* set LCB for cclk loopback on the port */
9492 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9493 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9494 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9495 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9496 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9497 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9498 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9499}
9500
Mike Marciniszyn77241052015-07-30 15:17:43 -04009501int bringup_serdes(struct hfi1_pportdata *ppd)
9502{
9503 struct hfi1_devdata *dd = ppd->dd;
9504 u64 guid;
9505 int ret;
9506
9507 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9508 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9509
9510 guid = ppd->guid;
9511 if (!guid) {
9512 if (dd->base_guid)
9513 guid = dd->base_guid + ppd->port - 1;
9514 ppd->guid = guid;
9515 }
9516
Mike Marciniszyn77241052015-07-30 15:17:43 -04009517 /* Set linkinit_reason on power up per OPA spec */
9518 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9519
Dean Luickbbdeb332015-12-01 15:38:15 -05009520 /* one-time init of the LCB */
9521 init_lcb(dd);
9522
Mike Marciniszyn77241052015-07-30 15:17:43 -04009523 if (loopback) {
9524 ret = init_loopback(dd);
9525 if (ret < 0)
9526 return ret;
9527 }
9528
Easwar Hariharan9775a992016-05-12 10:22:39 -07009529 get_port_type(ppd);
9530 if (ppd->port_type == PORT_TYPE_QSFP) {
9531 set_qsfp_int_n(ppd, 0);
9532 wait_for_qsfp_init(ppd);
9533 set_qsfp_int_n(ppd, 1);
9534 }
9535
9536 /*
9537 * Tune the SerDes to a ballpark setting for
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009538 * optimal signal and bit error rate
9539 * Needs to be done before starting the link
9540 */
9541 tune_serdes(ppd);
9542
Mike Marciniszyn77241052015-07-30 15:17:43 -04009543 return start_link(ppd);
9544}
9545
9546void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9547{
9548 struct hfi1_devdata *dd = ppd->dd;
9549
9550 /*
9551 * Shut down the link and keep it down. First turn off that the
9552 * driver wants to allow the link to be up (driver_link_ready).
9553 * Then make sure the link is not automatically restarted
9554 * (link_enabled). Cancel any pending restart. And finally
9555 * go offline.
9556 */
9557 ppd->driver_link_ready = 0;
9558 ppd->link_enabled = 0;
9559
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009560 ppd->offline_disabled_reason =
9561 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009562 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009563 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009564 set_link_state(ppd, HLS_DN_OFFLINE);
9565
9566 /* disable the port */
9567 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9568}
9569
9570static inline int init_cpu_counters(struct hfi1_devdata *dd)
9571{
9572 struct hfi1_pportdata *ppd;
9573 int i;
9574
9575 ppd = (struct hfi1_pportdata *)(dd + 1);
9576 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009577 ppd->ibport_data.rvp.rc_acks = NULL;
9578 ppd->ibport_data.rvp.rc_qacks = NULL;
9579 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9580 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9581 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9582 if (!ppd->ibport_data.rvp.rc_acks ||
9583 !ppd->ibport_data.rvp.rc_delayed_comp ||
9584 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009585 return -ENOMEM;
9586 }
9587
9588 return 0;
9589}
9590
9591static const char * const pt_names[] = {
9592 "expected",
9593 "eager",
9594 "invalid"
9595};
9596
9597static const char *pt_name(u32 type)
9598{
9599 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9600}
9601
9602/*
9603 * index is the index into the receive array
9604 */
9605void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9606 u32 type, unsigned long pa, u16 order)
9607{
9608 u64 reg;
9609 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9610 (dd->kregbase + RCV_ARRAY));
9611
9612 if (!(dd->flags & HFI1_PRESENT))
9613 goto done;
9614
9615 if (type == PT_INVALID) {
9616 pa = 0;
9617 } else if (type > PT_INVALID) {
9618 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009619 "unexpected receive array type %u for index %u, not handled\n",
9620 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009621 goto done;
9622 }
9623
9624 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9625 pt_name(type), index, pa, (unsigned long)order);
9626
9627#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9628 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9629 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9630 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9631 << RCV_ARRAY_RT_ADDR_SHIFT;
9632 writeq(reg, base + (index * 8));
9633
9634 if (type == PT_EAGER)
9635 /*
9636 * Eager entries are written one-by-one so we have to push them
9637 * after we write the entry.
9638 */
9639 flush_wc();
9640done:
9641 return;
9642}
9643
9644void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9645{
9646 struct hfi1_devdata *dd = rcd->dd;
9647 u32 i;
9648
9649 /* this could be optimized */
9650 for (i = rcd->eager_base; i < rcd->eager_base +
9651 rcd->egrbufs.alloced; i++)
9652 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9653
9654 for (i = rcd->expected_base;
9655 i < rcd->expected_base + rcd->expected_count; i++)
9656 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9657}
9658
9659int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9660 struct hfi1_ctxt_info *kinfo)
9661{
9662 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9663 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9664 return 0;
9665}
9666
9667struct hfi1_message_header *hfi1_get_msgheader(
9668 struct hfi1_devdata *dd, __le32 *rhf_addr)
9669{
9670 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9671
9672 return (struct hfi1_message_header *)
9673 (rhf_addr - dd->rhf_offset + offset);
9674}
9675
9676static const char * const ib_cfg_name_strings[] = {
9677 "HFI1_IB_CFG_LIDLMC",
9678 "HFI1_IB_CFG_LWID_DG_ENB",
9679 "HFI1_IB_CFG_LWID_ENB",
9680 "HFI1_IB_CFG_LWID",
9681 "HFI1_IB_CFG_SPD_ENB",
9682 "HFI1_IB_CFG_SPD",
9683 "HFI1_IB_CFG_RXPOL_ENB",
9684 "HFI1_IB_CFG_LREV_ENB",
9685 "HFI1_IB_CFG_LINKLATENCY",
9686 "HFI1_IB_CFG_HRTBT",
9687 "HFI1_IB_CFG_OP_VLS",
9688 "HFI1_IB_CFG_VL_HIGH_CAP",
9689 "HFI1_IB_CFG_VL_LOW_CAP",
9690 "HFI1_IB_CFG_OVERRUN_THRESH",
9691 "HFI1_IB_CFG_PHYERR_THRESH",
9692 "HFI1_IB_CFG_LINKDEFAULT",
9693 "HFI1_IB_CFG_PKEYS",
9694 "HFI1_IB_CFG_MTU",
9695 "HFI1_IB_CFG_LSTATE",
9696 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9697 "HFI1_IB_CFG_PMA_TICKS",
9698 "HFI1_IB_CFG_PORT"
9699};
9700
9701static const char *ib_cfg_name(int which)
9702{
9703 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9704 return "invalid";
9705 return ib_cfg_name_strings[which];
9706}
9707
9708int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9709{
9710 struct hfi1_devdata *dd = ppd->dd;
9711 int val = 0;
9712
9713 switch (which) {
9714 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9715 val = ppd->link_width_enabled;
9716 break;
9717 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9718 val = ppd->link_width_active;
9719 break;
9720 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9721 val = ppd->link_speed_enabled;
9722 break;
9723 case HFI1_IB_CFG_SPD: /* current Link speed */
9724 val = ppd->link_speed_active;
9725 break;
9726
9727 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9728 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9729 case HFI1_IB_CFG_LINKLATENCY:
9730 goto unimplemented;
9731
9732 case HFI1_IB_CFG_OP_VLS:
9733 val = ppd->vls_operational;
9734 break;
9735 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9736 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9737 break;
9738 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9739 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9740 break;
9741 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9742 val = ppd->overrun_threshold;
9743 break;
9744 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9745 val = ppd->phy_error_threshold;
9746 break;
9747 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9748 val = dd->link_default;
9749 break;
9750
9751 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9752 case HFI1_IB_CFG_PMA_TICKS:
9753 default:
9754unimplemented:
9755 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9756 dd_dev_info(
9757 dd,
9758 "%s: which %s: not implemented\n",
9759 __func__,
9760 ib_cfg_name(which));
9761 break;
9762 }
9763
9764 return val;
9765}
9766
9767/*
9768 * The largest MAD packet size.
9769 */
9770#define MAX_MAD_PACKET 2048
9771
9772/*
9773 * Return the maximum header bytes that can go on the _wire_
9774 * for this device. This count includes the ICRC which is
9775 * not part of the packet held in memory but it is appended
9776 * by the HW.
9777 * This is dependent on the device's receive header entry size.
9778 * HFI allows this to be set per-receive context, but the
9779 * driver presently enforces a global value.
9780 */
9781u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9782{
9783 /*
9784 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9785 * the Receive Header Entry Size minus the PBC (or RHF) size
9786 * plus one DW for the ICRC appended by HW.
9787 *
9788 * dd->rcd[0].rcvhdrqentsize is in DW.
9789 * We use rcd[0] as all context will have the same value. Also,
9790 * the first kernel context would have been allocated by now so
9791 * we are guaranteed a valid value.
9792 */
9793 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9794}
9795
9796/*
9797 * Set Send Length
9798 * @ppd - per port data
9799 *
9800 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9801 * registers compare against LRH.PktLen, so use the max bytes included
9802 * in the LRH.
9803 *
9804 * This routine changes all VL values except VL15, which it maintains at
9805 * the same value.
9806 */
9807static void set_send_length(struct hfi1_pportdata *ppd)
9808{
9809 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009810 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9811 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009812 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9813 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9814 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009815 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009816 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009817
9818 for (i = 0; i < ppd->vls_supported; i++) {
9819 if (dd->vld[i].mtu > maxvlmtu)
9820 maxvlmtu = dd->vld[i].mtu;
9821 if (i <= 3)
9822 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9823 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9824 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9825 else
9826 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9827 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9828 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9829 }
9830 write_csr(dd, SEND_LEN_CHECK0, len1);
9831 write_csr(dd, SEND_LEN_CHECK1, len2);
9832 /* adjust kernel credit return thresholds based on new MTUs */
9833 /* all kernel receive contexts have the same hdrqentsize */
9834 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009835 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9836 sc_mtu_to_threshold(dd->vld[i].sc,
9837 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009838 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009839 for (j = 0; j < INIT_SC_PER_VL; j++)
9840 sc_set_cr_threshold(
9841 pio_select_send_context_vl(dd, j, i),
9842 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009843 }
9844 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9845 sc_mtu_to_threshold(dd->vld[15].sc,
9846 dd->vld[15].mtu,
9847 dd->rcd[0]->rcvhdrqentsize));
9848 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009849
9850 /* Adjust maximum MTU for the port in DC */
9851 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9852 (ilog2(maxvlmtu >> 8) + 1);
9853 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9854 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9855 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9856 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9857 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9858}
9859
9860static void set_lidlmc(struct hfi1_pportdata *ppd)
9861{
9862 int i;
9863 u64 sreg = 0;
9864 struct hfi1_devdata *dd = ppd->dd;
9865 u32 mask = ~((1U << ppd->lmc) - 1);
9866 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9867
9868 if (dd->hfi1_snoop.mode_flag)
9869 dd_dev_info(dd, "Set lid/lmc while snooping");
9870
9871 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9872 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9873 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009874 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009875 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9876 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9877 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9878
9879 /*
9880 * Iterate over all the send contexts and set their SLID check
9881 */
9882 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9883 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9884 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9885 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9886
9887 for (i = 0; i < dd->chip_send_contexts; i++) {
9888 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9889 i, (u32)sreg);
9890 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9891 }
9892
9893 /* Now we have to do the same thing for the sdma engines */
9894 sdma_update_lmc(dd, mask, ppd->lid);
9895}
9896
9897static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9898{
9899 unsigned long timeout;
9900 u32 curr_state;
9901
9902 timeout = jiffies + msecs_to_jiffies(msecs);
9903 while (1) {
9904 curr_state = read_physical_state(dd);
9905 if (curr_state == state)
9906 break;
9907 if (time_after(jiffies, timeout)) {
9908 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009909 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9910 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009911 return -ETIMEDOUT;
9912 }
9913 usleep_range(1950, 2050); /* sleep 2ms-ish */
9914 }
9915
9916 return 0;
9917}
9918
9919/*
9920 * Helper for set_link_state(). Do not call except from that routine.
9921 * Expects ppd->hls_mutex to be held.
9922 *
9923 * @rem_reason value to be sent to the neighbor
9924 *
9925 * LinkDownReasons only set if transition succeeds.
9926 */
9927static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9928{
9929 struct hfi1_devdata *dd = ppd->dd;
9930 u32 pstate, previous_state;
9931 u32 last_local_state;
9932 u32 last_remote_state;
9933 int ret;
9934 int do_transition;
9935 int do_wait;
9936
9937 previous_state = ppd->host_link_state;
9938 ppd->host_link_state = HLS_GOING_OFFLINE;
9939 pstate = read_physical_state(dd);
9940 if (pstate == PLS_OFFLINE) {
9941 do_transition = 0; /* in right state */
9942 do_wait = 0; /* ...no need to wait */
9943 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9944 do_transition = 0; /* in an offline transient state */
9945 do_wait = 1; /* ...wait for it to settle */
9946 } else {
9947 do_transition = 1; /* need to move to offline */
9948 do_wait = 1; /* ...will need to wait */
9949 }
9950
9951 if (do_transition) {
9952 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -08009953 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009954
9955 if (ret != HCMD_SUCCESS) {
9956 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009957 "Failed to transition to Offline link state, return %d\n",
9958 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009959 return -EINVAL;
9960 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009961 if (ppd->offline_disabled_reason ==
9962 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009963 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009964 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009965 }
9966
9967 if (do_wait) {
9968 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009969 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009970 if (ret < 0)
9971 return ret;
9972 }
9973
9974 /* make sure the logical state is also down */
9975 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9976
9977 /*
9978 * Now in charge of LCB - must be after the physical state is
9979 * offline.quiet and before host_link_state is changed.
9980 */
9981 set_host_lcb_access(dd);
9982 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9983 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9984
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009985 if (ppd->port_type == PORT_TYPE_QSFP &&
9986 ppd->qsfp_info.limiting_active &&
9987 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -08009988 int ret;
9989
9990 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9991 if (ret == 0) {
9992 set_qsfp_tx(ppd, 0);
9993 release_chip_resource(dd, qsfp_resource(dd));
9994 } else {
9995 /* not fatal, but should warn */
9996 dd_dev_err(dd,
9997 "Unable to acquire lock to turn off QSFP TX\n");
9998 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009999 }
10000
Mike Marciniszyn77241052015-07-30 15:17:43 -040010001 /*
10002 * The LNI has a mandatory wait time after the physical state
10003 * moves to Offline.Quiet. The wait time may be different
10004 * depending on how the link went down. The 8051 firmware
10005 * will observe the needed wait time and only move to ready
10006 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010007 * is 6s, so wait that long and then at least 0.5s more for
10008 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010009 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010010 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010011 if (ret) {
10012 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010013 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010014 /* state is really offline, so make it so */
10015 ppd->host_link_state = HLS_DN_OFFLINE;
10016 return ret;
10017 }
10018
10019 /*
10020 * The state is now offline and the 8051 is ready to accept host
10021 * requests.
10022 * - change our state
10023 * - notify others if we were previously in a linkup state
10024 */
10025 ppd->host_link_state = HLS_DN_OFFLINE;
10026 if (previous_state & HLS_UP) {
10027 /* went down while link was up */
10028 handle_linkup_change(dd, 0);
10029 } else if (previous_state
10030 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10031 /* went down while attempting link up */
10032 /* byte 1 of last_*_state is the failure reason */
10033 read_last_local_state(dd, &last_local_state);
10034 read_last_remote_state(dd, &last_remote_state);
10035 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010036 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
10037 last_local_state, last_remote_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010038 }
10039
10040 /* the active link width (downgrade) is 0 on link down */
10041 ppd->link_width_active = 0;
10042 ppd->link_width_downgrade_tx_active = 0;
10043 ppd->link_width_downgrade_rx_active = 0;
10044 ppd->current_egress_rate = 0;
10045 return 0;
10046}
10047
10048/* return the link state name */
10049static const char *link_state_name(u32 state)
10050{
10051 const char *name;
10052 int n = ilog2(state);
10053 static const char * const names[] = {
10054 [__HLS_UP_INIT_BP] = "INIT",
10055 [__HLS_UP_ARMED_BP] = "ARMED",
10056 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10057 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10058 [__HLS_DN_POLL_BP] = "POLL",
10059 [__HLS_DN_DISABLE_BP] = "DISABLE",
10060 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10061 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10062 [__HLS_GOING_UP_BP] = "GOING_UP",
10063 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10064 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10065 };
10066
10067 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10068 return name ? name : "unknown";
10069}
10070
10071/* return the link state reason name */
10072static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10073{
10074 if (state == HLS_UP_INIT) {
10075 switch (ppd->linkinit_reason) {
10076 case OPA_LINKINIT_REASON_LINKUP:
10077 return "(LINKUP)";
10078 case OPA_LINKINIT_REASON_FLAPPING:
10079 return "(FLAPPING)";
10080 case OPA_LINKINIT_OUTSIDE_POLICY:
10081 return "(OUTSIDE_POLICY)";
10082 case OPA_LINKINIT_QUARANTINED:
10083 return "(QUARANTINED)";
10084 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10085 return "(INSUFIC_CAPABILITY)";
10086 default:
10087 break;
10088 }
10089 }
10090 return "";
10091}
10092
10093/*
10094 * driver_physical_state - convert the driver's notion of a port's
10095 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10096 * Return -1 (converted to a u32) to indicate error.
10097 */
10098u32 driver_physical_state(struct hfi1_pportdata *ppd)
10099{
10100 switch (ppd->host_link_state) {
10101 case HLS_UP_INIT:
10102 case HLS_UP_ARMED:
10103 case HLS_UP_ACTIVE:
10104 return IB_PORTPHYSSTATE_LINKUP;
10105 case HLS_DN_POLL:
10106 return IB_PORTPHYSSTATE_POLLING;
10107 case HLS_DN_DISABLE:
10108 return IB_PORTPHYSSTATE_DISABLED;
10109 case HLS_DN_OFFLINE:
10110 return OPA_PORTPHYSSTATE_OFFLINE;
10111 case HLS_VERIFY_CAP:
10112 return IB_PORTPHYSSTATE_POLLING;
10113 case HLS_GOING_UP:
10114 return IB_PORTPHYSSTATE_POLLING;
10115 case HLS_GOING_OFFLINE:
10116 return OPA_PORTPHYSSTATE_OFFLINE;
10117 case HLS_LINK_COOLDOWN:
10118 return OPA_PORTPHYSSTATE_OFFLINE;
10119 case HLS_DN_DOWNDEF:
10120 default:
10121 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10122 ppd->host_link_state);
10123 return -1;
10124 }
10125}
10126
10127/*
10128 * driver_logical_state - convert the driver's notion of a port's
10129 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10130 * (converted to a u32) to indicate error.
10131 */
10132u32 driver_logical_state(struct hfi1_pportdata *ppd)
10133{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010134 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010135 return IB_PORT_DOWN;
10136
10137 switch (ppd->host_link_state & HLS_UP) {
10138 case HLS_UP_INIT:
10139 return IB_PORT_INIT;
10140 case HLS_UP_ARMED:
10141 return IB_PORT_ARMED;
10142 case HLS_UP_ACTIVE:
10143 return IB_PORT_ACTIVE;
10144 default:
10145 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10146 ppd->host_link_state);
10147 return -1;
10148 }
10149}
10150
10151void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10152 u8 neigh_reason, u8 rem_reason)
10153{
10154 if (ppd->local_link_down_reason.latest == 0 &&
10155 ppd->neigh_link_down_reason.latest == 0) {
10156 ppd->local_link_down_reason.latest = lcl_reason;
10157 ppd->neigh_link_down_reason.latest = neigh_reason;
10158 ppd->remote_link_down_reason = rem_reason;
10159 }
10160}
10161
10162/*
10163 * Change the physical and/or logical link state.
10164 *
10165 * Do not call this routine while inside an interrupt. It contains
10166 * calls to routines that can take multiple seconds to finish.
10167 *
10168 * Returns 0 on success, -errno on failure.
10169 */
10170int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10171{
10172 struct hfi1_devdata *dd = ppd->dd;
10173 struct ib_event event = {.device = NULL};
10174 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010175 int orig_new_state, poll_bounce;
10176
10177 mutex_lock(&ppd->hls_lock);
10178
10179 orig_new_state = state;
10180 if (state == HLS_DN_DOWNDEF)
10181 state = dd->link_default;
10182
10183 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010184 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10185 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010186
10187 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010188 link_state_name(ppd->host_link_state),
10189 link_state_name(orig_new_state),
10190 poll_bounce ? "(bounce) " : "",
10191 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010192
Mike Marciniszyn77241052015-07-30 15:17:43 -040010193 /*
10194 * If we're going to a (HLS_*) link state that implies the logical
10195 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10196 * reset is_sm_config_started to 0.
10197 */
10198 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10199 ppd->is_sm_config_started = 0;
10200
10201 /*
10202 * Do nothing if the states match. Let a poll to poll link bounce
10203 * go through.
10204 */
10205 if (ppd->host_link_state == state && !poll_bounce)
10206 goto done;
10207
10208 switch (state) {
10209 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010210 if (ppd->host_link_state == HLS_DN_POLL &&
10211 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010212 /*
10213 * Quick link up jumps from polling to here.
10214 *
10215 * Whether in normal or loopback mode, the
10216 * simulator jumps from polling to link up.
10217 * Accept that here.
10218 */
Jubin John17fb4f22016-02-14 20:21:52 -080010219 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010220 } else if (ppd->host_link_state != HLS_GOING_UP) {
10221 goto unexpected;
10222 }
10223
10224 ppd->host_link_state = HLS_UP_INIT;
10225 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10226 if (ret) {
10227 /* logical state didn't change, stay at going_up */
10228 ppd->host_link_state = HLS_GOING_UP;
10229 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010230 "%s: logical state did not change to INIT\n",
10231 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010232 } else {
10233 /* clear old transient LINKINIT_REASON code */
10234 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10235 ppd->linkinit_reason =
10236 OPA_LINKINIT_REASON_LINKUP;
10237
10238 /* enable the port */
10239 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10240
10241 handle_linkup_change(dd, 1);
10242 }
10243 break;
10244 case HLS_UP_ARMED:
10245 if (ppd->host_link_state != HLS_UP_INIT)
10246 goto unexpected;
10247
10248 ppd->host_link_state = HLS_UP_ARMED;
10249 set_logical_state(dd, LSTATE_ARMED);
10250 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10251 if (ret) {
10252 /* logical state didn't change, stay at init */
10253 ppd->host_link_state = HLS_UP_INIT;
10254 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010255 "%s: logical state did not change to ARMED\n",
10256 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010257 }
10258 /*
10259 * The simulator does not currently implement SMA messages,
10260 * so neighbor_normal is not set. Set it here when we first
10261 * move to Armed.
10262 */
10263 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10264 ppd->neighbor_normal = 1;
10265 break;
10266 case HLS_UP_ACTIVE:
10267 if (ppd->host_link_state != HLS_UP_ARMED)
10268 goto unexpected;
10269
10270 ppd->host_link_state = HLS_UP_ACTIVE;
10271 set_logical_state(dd, LSTATE_ACTIVE);
10272 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10273 if (ret) {
10274 /* logical state didn't change, stay at armed */
10275 ppd->host_link_state = HLS_UP_ARMED;
10276 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010277 "%s: logical state did not change to ACTIVE\n",
10278 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010279 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010280 /* tell all engines to go running */
10281 sdma_all_running(dd);
10282
10283 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010284 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010285 event.element.port_num = ppd->port;
10286 event.event = IB_EVENT_PORT_ACTIVE;
10287 }
10288 break;
10289 case HLS_DN_POLL:
10290 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10291 ppd->host_link_state == HLS_DN_OFFLINE) &&
10292 dd->dc_shutdown)
10293 dc_start(dd);
10294 /* Hand LED control to the DC */
10295 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10296
10297 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10298 u8 tmp = ppd->link_enabled;
10299
10300 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10301 if (ret) {
10302 ppd->link_enabled = tmp;
10303 break;
10304 }
10305 ppd->remote_link_down_reason = 0;
10306
10307 if (ppd->driver_link_ready)
10308 ppd->link_enabled = 1;
10309 }
10310
Jim Snowfb9036d2016-01-11 18:32:21 -050010311 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010312 ret = set_local_link_attributes(ppd);
10313 if (ret)
10314 break;
10315
10316 ppd->port_error_action = 0;
10317 ppd->host_link_state = HLS_DN_POLL;
10318
10319 if (quick_linkup) {
10320 /* quick linkup does not go into polling */
10321 ret = do_quick_linkup(dd);
10322 } else {
10323 ret1 = set_physical_link_state(dd, PLS_POLLING);
10324 if (ret1 != HCMD_SUCCESS) {
10325 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010326 "Failed to transition to Polling link state, return 0x%x\n",
10327 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010328 ret = -EINVAL;
10329 }
10330 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010331 ppd->offline_disabled_reason =
10332 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010333 /*
10334 * If an error occurred above, go back to offline. The
10335 * caller may reschedule another attempt.
10336 */
10337 if (ret)
10338 goto_offline(ppd, 0);
10339 break;
10340 case HLS_DN_DISABLE:
10341 /* link is disabled */
10342 ppd->link_enabled = 0;
10343
10344 /* allow any state to transition to disabled */
10345
10346 /* must transition to offline first */
10347 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10348 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10349 if (ret)
10350 break;
10351 ppd->remote_link_down_reason = 0;
10352 }
10353
10354 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10355 if (ret1 != HCMD_SUCCESS) {
10356 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010357 "Failed to transition to Disabled link state, return 0x%x\n",
10358 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010359 ret = -EINVAL;
10360 break;
10361 }
10362 ppd->host_link_state = HLS_DN_DISABLE;
10363 dc_shutdown(dd);
10364 break;
10365 case HLS_DN_OFFLINE:
10366 if (ppd->host_link_state == HLS_DN_DISABLE)
10367 dc_start(dd);
10368
10369 /* allow any state to transition to offline */
10370 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10371 if (!ret)
10372 ppd->remote_link_down_reason = 0;
10373 break;
10374 case HLS_VERIFY_CAP:
10375 if (ppd->host_link_state != HLS_DN_POLL)
10376 goto unexpected;
10377 ppd->host_link_state = HLS_VERIFY_CAP;
10378 break;
10379 case HLS_GOING_UP:
10380 if (ppd->host_link_state != HLS_VERIFY_CAP)
10381 goto unexpected;
10382
10383 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10384 if (ret1 != HCMD_SUCCESS) {
10385 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010386 "Failed to transition to link up state, return 0x%x\n",
10387 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010388 ret = -EINVAL;
10389 break;
10390 }
10391 ppd->host_link_state = HLS_GOING_UP;
10392 break;
10393
10394 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10395 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10396 default:
10397 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010398 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010399 ret = -EINVAL;
10400 break;
10401 }
10402
Mike Marciniszyn77241052015-07-30 15:17:43 -040010403 goto done;
10404
10405unexpected:
10406 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010407 __func__, link_state_name(ppd->host_link_state),
10408 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010409 ret = -EINVAL;
10410
10411done:
10412 mutex_unlock(&ppd->hls_lock);
10413
10414 if (event.device)
10415 ib_dispatch_event(&event);
10416
10417 return ret;
10418}
10419
10420int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10421{
10422 u64 reg;
10423 int ret = 0;
10424
10425 switch (which) {
10426 case HFI1_IB_CFG_LIDLMC:
10427 set_lidlmc(ppd);
10428 break;
10429 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10430 /*
10431 * The VL Arbitrator high limit is sent in units of 4k
10432 * bytes, while HFI stores it in units of 64 bytes.
10433 */
Jubin John8638b772016-02-14 20:19:24 -080010434 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010435 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10436 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10437 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10438 break;
10439 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10440 /* HFI only supports POLL as the default link down state */
10441 if (val != HLS_DN_POLL)
10442 ret = -EINVAL;
10443 break;
10444 case HFI1_IB_CFG_OP_VLS:
10445 if (ppd->vls_operational != val) {
10446 ppd->vls_operational = val;
10447 if (!ppd->port)
10448 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010449 }
10450 break;
10451 /*
10452 * For link width, link width downgrade, and speed enable, always AND
10453 * the setting with what is actually supported. This has two benefits.
10454 * First, enabled can't have unsupported values, no matter what the
10455 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10456 * "fill in with your supported value" have all the bits in the
10457 * field set, so simply ANDing with supported has the desired result.
10458 */
10459 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10460 ppd->link_width_enabled = val & ppd->link_width_supported;
10461 break;
10462 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10463 ppd->link_width_downgrade_enabled =
10464 val & ppd->link_width_downgrade_supported;
10465 break;
10466 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10467 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10468 break;
10469 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10470 /*
10471 * HFI does not follow IB specs, save this value
10472 * so we can report it, if asked.
10473 */
10474 ppd->overrun_threshold = val;
10475 break;
10476 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10477 /*
10478 * HFI does not follow IB specs, save this value
10479 * so we can report it, if asked.
10480 */
10481 ppd->phy_error_threshold = val;
10482 break;
10483
10484 case HFI1_IB_CFG_MTU:
10485 set_send_length(ppd);
10486 break;
10487
10488 case HFI1_IB_CFG_PKEYS:
10489 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10490 set_partition_keys(ppd);
10491 break;
10492
10493 default:
10494 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10495 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010496 "%s: which %s, val 0x%x: not implemented\n",
10497 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010498 break;
10499 }
10500 return ret;
10501}
10502
10503/* begin functions related to vl arbitration table caching */
10504static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10505{
10506 int i;
10507
10508 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10509 VL_ARB_LOW_PRIO_TABLE_SIZE);
10510 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10511 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10512
10513 /*
10514 * Note that we always return values directly from the
10515 * 'vl_arb_cache' (and do no CSR reads) in response to a
10516 * 'Get(VLArbTable)'. This is obviously correct after a
10517 * 'Set(VLArbTable)', since the cache will then be up to
10518 * date. But it's also correct prior to any 'Set(VLArbTable)'
10519 * since then both the cache, and the relevant h/w registers
10520 * will be zeroed.
10521 */
10522
10523 for (i = 0; i < MAX_PRIO_TABLE; i++)
10524 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10525}
10526
10527/*
10528 * vl_arb_lock_cache
10529 *
10530 * All other vl_arb_* functions should be called only after locking
10531 * the cache.
10532 */
10533static inline struct vl_arb_cache *
10534vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10535{
10536 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10537 return NULL;
10538 spin_lock(&ppd->vl_arb_cache[idx].lock);
10539 return &ppd->vl_arb_cache[idx];
10540}
10541
10542static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10543{
10544 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10545}
10546
10547static void vl_arb_get_cache(struct vl_arb_cache *cache,
10548 struct ib_vl_weight_elem *vl)
10549{
10550 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10551}
10552
10553static void vl_arb_set_cache(struct vl_arb_cache *cache,
10554 struct ib_vl_weight_elem *vl)
10555{
10556 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10557}
10558
10559static int vl_arb_match_cache(struct vl_arb_cache *cache,
10560 struct ib_vl_weight_elem *vl)
10561{
10562 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10563}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010564
Mike Marciniszyn77241052015-07-30 15:17:43 -040010565/* end functions related to vl arbitration table caching */
10566
10567static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10568 u32 size, struct ib_vl_weight_elem *vl)
10569{
10570 struct hfi1_devdata *dd = ppd->dd;
10571 u64 reg;
10572 unsigned int i, is_up = 0;
10573 int drain, ret = 0;
10574
10575 mutex_lock(&ppd->hls_lock);
10576
10577 if (ppd->host_link_state & HLS_UP)
10578 is_up = 1;
10579
10580 drain = !is_ax(dd) && is_up;
10581
10582 if (drain)
10583 /*
10584 * Before adjusting VL arbitration weights, empty per-VL
10585 * FIFOs, otherwise a packet whose VL weight is being
10586 * set to 0 could get stuck in a FIFO with no chance to
10587 * egress.
10588 */
10589 ret = stop_drain_data_vls(dd);
10590
10591 if (ret) {
10592 dd_dev_err(
10593 dd,
10594 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10595 __func__);
10596 goto err;
10597 }
10598
10599 for (i = 0; i < size; i++, vl++) {
10600 /*
10601 * NOTE: The low priority shift and mask are used here, but
10602 * they are the same for both the low and high registers.
10603 */
10604 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10605 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10606 | (((u64)vl->weight
10607 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10608 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10609 write_csr(dd, target + (i * 8), reg);
10610 }
10611 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10612
10613 if (drain)
10614 open_fill_data_vls(dd); /* reopen all VLs */
10615
10616err:
10617 mutex_unlock(&ppd->hls_lock);
10618
10619 return ret;
10620}
10621
10622/*
10623 * Read one credit merge VL register.
10624 */
10625static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10626 struct vl_limit *vll)
10627{
10628 u64 reg = read_csr(dd, csr);
10629
10630 vll->dedicated = cpu_to_be16(
10631 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10632 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10633 vll->shared = cpu_to_be16(
10634 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10635 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10636}
10637
10638/*
10639 * Read the current credit merge limits.
10640 */
10641static int get_buffer_control(struct hfi1_devdata *dd,
10642 struct buffer_control *bc, u16 *overall_limit)
10643{
10644 u64 reg;
10645 int i;
10646
10647 /* not all entries are filled in */
10648 memset(bc, 0, sizeof(*bc));
10649
10650 /* OPA and HFI have a 1-1 mapping */
10651 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010652 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010653
10654 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10655 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10656
10657 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10658 bc->overall_shared_limit = cpu_to_be16(
10659 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10660 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10661 if (overall_limit)
10662 *overall_limit = (reg
10663 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10664 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10665 return sizeof(struct buffer_control);
10666}
10667
10668static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10669{
10670 u64 reg;
10671 int i;
10672
10673 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10674 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10675 for (i = 0; i < sizeof(u64); i++) {
10676 u8 byte = *(((u8 *)&reg) + i);
10677
10678 dp->vlnt[2 * i] = byte & 0xf;
10679 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10680 }
10681
10682 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10683 for (i = 0; i < sizeof(u64); i++) {
10684 u8 byte = *(((u8 *)&reg) + i);
10685
10686 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10687 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10688 }
10689 return sizeof(struct sc2vlnt);
10690}
10691
10692static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10693 struct ib_vl_weight_elem *vl)
10694{
10695 unsigned int i;
10696
10697 for (i = 0; i < nelems; i++, vl++) {
10698 vl->vl = 0xf;
10699 vl->weight = 0;
10700 }
10701}
10702
10703static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10704{
10705 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010706 DC_SC_VL_VAL(15_0,
10707 0, dp->vlnt[0] & 0xf,
10708 1, dp->vlnt[1] & 0xf,
10709 2, dp->vlnt[2] & 0xf,
10710 3, dp->vlnt[3] & 0xf,
10711 4, dp->vlnt[4] & 0xf,
10712 5, dp->vlnt[5] & 0xf,
10713 6, dp->vlnt[6] & 0xf,
10714 7, dp->vlnt[7] & 0xf,
10715 8, dp->vlnt[8] & 0xf,
10716 9, dp->vlnt[9] & 0xf,
10717 10, dp->vlnt[10] & 0xf,
10718 11, dp->vlnt[11] & 0xf,
10719 12, dp->vlnt[12] & 0xf,
10720 13, dp->vlnt[13] & 0xf,
10721 14, dp->vlnt[14] & 0xf,
10722 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010723 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080010724 DC_SC_VL_VAL(31_16,
10725 16, dp->vlnt[16] & 0xf,
10726 17, dp->vlnt[17] & 0xf,
10727 18, dp->vlnt[18] & 0xf,
10728 19, dp->vlnt[19] & 0xf,
10729 20, dp->vlnt[20] & 0xf,
10730 21, dp->vlnt[21] & 0xf,
10731 22, dp->vlnt[22] & 0xf,
10732 23, dp->vlnt[23] & 0xf,
10733 24, dp->vlnt[24] & 0xf,
10734 25, dp->vlnt[25] & 0xf,
10735 26, dp->vlnt[26] & 0xf,
10736 27, dp->vlnt[27] & 0xf,
10737 28, dp->vlnt[28] & 0xf,
10738 29, dp->vlnt[29] & 0xf,
10739 30, dp->vlnt[30] & 0xf,
10740 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010741}
10742
10743static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10744 u16 limit)
10745{
10746 if (limit != 0)
10747 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010748 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010749}
10750
10751/* change only the shared limit portion of SendCmGLobalCredit */
10752static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10753{
10754 u64 reg;
10755
10756 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10757 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10758 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10759 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10760}
10761
10762/* change only the total credit limit portion of SendCmGLobalCredit */
10763static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10764{
10765 u64 reg;
10766
10767 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10768 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10769 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10770 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10771}
10772
10773/* set the given per-VL shared limit */
10774static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10775{
10776 u64 reg;
10777 u32 addr;
10778
10779 if (vl < TXE_NUM_DATA_VL)
10780 addr = SEND_CM_CREDIT_VL + (8 * vl);
10781 else
10782 addr = SEND_CM_CREDIT_VL15;
10783
10784 reg = read_csr(dd, addr);
10785 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10786 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10787 write_csr(dd, addr, reg);
10788}
10789
10790/* set the given per-VL dedicated limit */
10791static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10792{
10793 u64 reg;
10794 u32 addr;
10795
10796 if (vl < TXE_NUM_DATA_VL)
10797 addr = SEND_CM_CREDIT_VL + (8 * vl);
10798 else
10799 addr = SEND_CM_CREDIT_VL15;
10800
10801 reg = read_csr(dd, addr);
10802 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10803 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10804 write_csr(dd, addr, reg);
10805}
10806
10807/* spin until the given per-VL status mask bits clear */
10808static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10809 const char *which)
10810{
10811 unsigned long timeout;
10812 u64 reg;
10813
10814 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10815 while (1) {
10816 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10817
10818 if (reg == 0)
10819 return; /* success */
10820 if (time_after(jiffies, timeout))
10821 break; /* timed out */
10822 udelay(1);
10823 }
10824
10825 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010826 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10827 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010828 /*
10829 * If this occurs, it is likely there was a credit loss on the link.
10830 * The only recovery from that is a link bounce.
10831 */
10832 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010833 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010834}
10835
10836/*
10837 * The number of credits on the VLs may be changed while everything
10838 * is "live", but the following algorithm must be followed due to
10839 * how the hardware is actually implemented. In particular,
10840 * Return_Credit_Status[] is the only correct status check.
10841 *
10842 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10843 * set Global_Shared_Credit_Limit = 0
10844 * use_all_vl = 1
10845 * mask0 = all VLs that are changing either dedicated or shared limits
10846 * set Shared_Limit[mask0] = 0
10847 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10848 * if (changing any dedicated limit)
10849 * mask1 = all VLs that are lowering dedicated limits
10850 * lower Dedicated_Limit[mask1]
10851 * spin until Return_Credit_Status[mask1] == 0
10852 * raise Dedicated_Limits
10853 * raise Shared_Limits
10854 * raise Global_Shared_Credit_Limit
10855 *
10856 * lower = if the new limit is lower, set the limit to the new value
10857 * raise = if the new limit is higher than the current value (may be changed
10858 * earlier in the algorithm), set the new limit to the new value
10859 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010860int set_buffer_control(struct hfi1_pportdata *ppd,
10861 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010862{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010863 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010864 u64 changing_mask, ld_mask, stat_mask;
10865 int change_count;
10866 int i, use_all_mask;
10867 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010868 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010869 /*
10870 * A0: add the variable any_shared_limit_changing below and in the
10871 * algorithm above. If removing A0 support, it can be removed.
10872 */
10873 int any_shared_limit_changing;
10874 struct buffer_control cur_bc;
10875 u8 changing[OPA_MAX_VLS];
10876 u8 lowering_dedicated[OPA_MAX_VLS];
10877 u16 cur_total;
10878 u32 new_total = 0;
10879 const u64 all_mask =
10880 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10881 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10882 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10883 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10884 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10885 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10886 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10887 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10888 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10889
10890#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10891#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10892
Mike Marciniszyn77241052015-07-30 15:17:43 -040010893 /* find the new total credits, do sanity check on unused VLs */
10894 for (i = 0; i < OPA_MAX_VLS; i++) {
10895 if (valid_vl(i)) {
10896 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10897 continue;
10898 }
10899 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080010900 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010901 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080010902 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010903 new_bc->vl[i].dedicated = 0;
10904 new_bc->vl[i].shared = 0;
10905 }
10906 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010907
Mike Marciniszyn77241052015-07-30 15:17:43 -040010908 /* fetch the current values */
10909 get_buffer_control(dd, &cur_bc, &cur_total);
10910
10911 /*
10912 * Create the masks we will use.
10913 */
10914 memset(changing, 0, sizeof(changing));
10915 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080010916 /*
10917 * NOTE: Assumes that the individual VL bits are adjacent and in
10918 * increasing order
10919 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010920 stat_mask =
10921 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10922 changing_mask = 0;
10923 ld_mask = 0;
10924 change_count = 0;
10925 any_shared_limit_changing = 0;
10926 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10927 if (!valid_vl(i))
10928 continue;
10929 this_shared_changing = new_bc->vl[i].shared
10930 != cur_bc.vl[i].shared;
10931 if (this_shared_changing)
10932 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010933 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10934 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010935 changing[i] = 1;
10936 changing_mask |= stat_mask;
10937 change_count++;
10938 }
10939 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10940 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10941 lowering_dedicated[i] = 1;
10942 ld_mask |= stat_mask;
10943 }
10944 }
10945
10946 /* bracket the credit change with a total adjustment */
10947 if (new_total > cur_total)
10948 set_global_limit(dd, new_total);
10949
10950 /*
10951 * Start the credit change algorithm.
10952 */
10953 use_all_mask = 0;
10954 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010955 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10956 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010957 set_global_shared(dd, 0);
10958 cur_bc.overall_shared_limit = 0;
10959 use_all_mask = 1;
10960 }
10961
10962 for (i = 0; i < NUM_USABLE_VLS; i++) {
10963 if (!valid_vl(i))
10964 continue;
10965
10966 if (changing[i]) {
10967 set_vl_shared(dd, i, 0);
10968 cur_bc.vl[i].shared = 0;
10969 }
10970 }
10971
10972 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080010973 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010974
10975 if (change_count > 0) {
10976 for (i = 0; i < NUM_USABLE_VLS; i++) {
10977 if (!valid_vl(i))
10978 continue;
10979
10980 if (lowering_dedicated[i]) {
10981 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010982 be16_to_cpu(new_bc->
10983 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010984 cur_bc.vl[i].dedicated =
10985 new_bc->vl[i].dedicated;
10986 }
10987 }
10988
10989 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10990
10991 /* now raise all dedicated that are going up */
10992 for (i = 0; i < NUM_USABLE_VLS; i++) {
10993 if (!valid_vl(i))
10994 continue;
10995
10996 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10997 be16_to_cpu(cur_bc.vl[i].dedicated))
10998 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010999 be16_to_cpu(new_bc->
11000 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011001 }
11002 }
11003
11004 /* next raise all shared that are going up */
11005 for (i = 0; i < NUM_USABLE_VLS; i++) {
11006 if (!valid_vl(i))
11007 continue;
11008
11009 if (be16_to_cpu(new_bc->vl[i].shared) >
11010 be16_to_cpu(cur_bc.vl[i].shared))
11011 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11012 }
11013
11014 /* finally raise the global shared */
11015 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011016 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011017 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011018 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011019
11020 /* bracket the credit change with a total adjustment */
11021 if (new_total < cur_total)
11022 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011023
11024 /*
11025 * Determine the actual number of operational VLS using the number of
11026 * dedicated and shared credits for each VL.
11027 */
11028 if (change_count > 0) {
11029 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11030 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11031 be16_to_cpu(new_bc->vl[i].shared) > 0)
11032 vl_count++;
11033 ppd->actual_vls_operational = vl_count;
11034 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11035 ppd->actual_vls_operational :
11036 ppd->vls_operational,
11037 NULL);
11038 if (ret == 0)
11039 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11040 ppd->actual_vls_operational :
11041 ppd->vls_operational, NULL);
11042 if (ret)
11043 return ret;
11044 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011045 return 0;
11046}
11047
11048/*
11049 * Read the given fabric manager table. Return the size of the
11050 * table (in bytes) on success, and a negative error code on
11051 * failure.
11052 */
11053int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11054
11055{
11056 int size;
11057 struct vl_arb_cache *vlc;
11058
11059 switch (which) {
11060 case FM_TBL_VL_HIGH_ARB:
11061 size = 256;
11062 /*
11063 * OPA specifies 128 elements (of 2 bytes each), though
11064 * HFI supports only 16 elements in h/w.
11065 */
11066 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11067 vl_arb_get_cache(vlc, t);
11068 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11069 break;
11070 case FM_TBL_VL_LOW_ARB:
11071 size = 256;
11072 /*
11073 * OPA specifies 128 elements (of 2 bytes each), though
11074 * HFI supports only 16 elements in h/w.
11075 */
11076 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11077 vl_arb_get_cache(vlc, t);
11078 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11079 break;
11080 case FM_TBL_BUFFER_CONTROL:
11081 size = get_buffer_control(ppd->dd, t, NULL);
11082 break;
11083 case FM_TBL_SC2VLNT:
11084 size = get_sc2vlnt(ppd->dd, t);
11085 break;
11086 case FM_TBL_VL_PREEMPT_ELEMS:
11087 size = 256;
11088 /* OPA specifies 128 elements, of 2 bytes each */
11089 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11090 break;
11091 case FM_TBL_VL_PREEMPT_MATRIX:
11092 size = 256;
11093 /*
11094 * OPA specifies that this is the same size as the VL
11095 * arbitration tables (i.e., 256 bytes).
11096 */
11097 break;
11098 default:
11099 return -EINVAL;
11100 }
11101 return size;
11102}
11103
11104/*
11105 * Write the given fabric manager table.
11106 */
11107int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11108{
11109 int ret = 0;
11110 struct vl_arb_cache *vlc;
11111
11112 switch (which) {
11113 case FM_TBL_VL_HIGH_ARB:
11114 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11115 if (vl_arb_match_cache(vlc, t)) {
11116 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11117 break;
11118 }
11119 vl_arb_set_cache(vlc, t);
11120 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11121 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11122 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11123 break;
11124 case FM_TBL_VL_LOW_ARB:
11125 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11126 if (vl_arb_match_cache(vlc, t)) {
11127 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11128 break;
11129 }
11130 vl_arb_set_cache(vlc, t);
11131 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11132 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11133 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11134 break;
11135 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011136 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011137 break;
11138 case FM_TBL_SC2VLNT:
11139 set_sc2vlnt(ppd->dd, t);
11140 break;
11141 default:
11142 ret = -EINVAL;
11143 }
11144 return ret;
11145}
11146
11147/*
11148 * Disable all data VLs.
11149 *
11150 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11151 */
11152static int disable_data_vls(struct hfi1_devdata *dd)
11153{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011154 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011155 return 1;
11156
11157 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11158
11159 return 0;
11160}
11161
11162/*
11163 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11164 * Just re-enables all data VLs (the "fill" part happens
11165 * automatically - the name was chosen for symmetry with
11166 * stop_drain_data_vls()).
11167 *
11168 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11169 */
11170int open_fill_data_vls(struct hfi1_devdata *dd)
11171{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011172 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011173 return 1;
11174
11175 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11176
11177 return 0;
11178}
11179
11180/*
11181 * drain_data_vls() - assumes that disable_data_vls() has been called,
11182 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11183 * engines to drop to 0.
11184 */
11185static void drain_data_vls(struct hfi1_devdata *dd)
11186{
11187 sc_wait(dd);
11188 sdma_wait(dd);
11189 pause_for_credit_return(dd);
11190}
11191
11192/*
11193 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11194 *
11195 * Use open_fill_data_vls() to resume using data VLs. This pair is
11196 * meant to be used like this:
11197 *
11198 * stop_drain_data_vls(dd);
11199 * // do things with per-VL resources
11200 * open_fill_data_vls(dd);
11201 */
11202int stop_drain_data_vls(struct hfi1_devdata *dd)
11203{
11204 int ret;
11205
11206 ret = disable_data_vls(dd);
11207 if (ret == 0)
11208 drain_data_vls(dd);
11209
11210 return ret;
11211}
11212
11213/*
11214 * Convert a nanosecond time to a cclock count. No matter how slow
11215 * the cclock, a non-zero ns will always have a non-zero result.
11216 */
11217u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11218{
11219 u32 cclocks;
11220
11221 if (dd->icode == ICODE_FPGA_EMULATION)
11222 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11223 else /* simulation pretends to be ASIC */
11224 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11225 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11226 cclocks = 1;
11227 return cclocks;
11228}
11229
11230/*
11231 * Convert a cclock count to nanoseconds. Not matter how slow
11232 * the cclock, a non-zero cclocks will always have a non-zero result.
11233 */
11234u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11235{
11236 u32 ns;
11237
11238 if (dd->icode == ICODE_FPGA_EMULATION)
11239 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11240 else /* simulation pretends to be ASIC */
11241 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11242 if (cclocks && !ns)
11243 ns = 1;
11244 return ns;
11245}
11246
11247/*
11248 * Dynamically adjust the receive interrupt timeout for a context based on
11249 * incoming packet rate.
11250 *
11251 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11252 */
11253static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11254{
11255 struct hfi1_devdata *dd = rcd->dd;
11256 u32 timeout = rcd->rcvavail_timeout;
11257
11258 /*
11259 * This algorithm doubles or halves the timeout depending on whether
11260 * the number of packets received in this interrupt were less than or
11261 * greater equal the interrupt count.
11262 *
11263 * The calculations below do not allow a steady state to be achieved.
11264 * Only at the endpoints it is possible to have an unchanging
11265 * timeout.
11266 */
11267 if (npkts < rcv_intr_count) {
11268 /*
11269 * Not enough packets arrived before the timeout, adjust
11270 * timeout downward.
11271 */
11272 if (timeout < 2) /* already at minimum? */
11273 return;
11274 timeout >>= 1;
11275 } else {
11276 /*
11277 * More than enough packets arrived before the timeout, adjust
11278 * timeout upward.
11279 */
11280 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11281 return;
11282 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11283 }
11284
11285 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011286 /*
11287 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11288 * been verified to be in range
11289 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011290 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011291 (u64)timeout <<
11292 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011293}
11294
11295void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11296 u32 intr_adjust, u32 npkts)
11297{
11298 struct hfi1_devdata *dd = rcd->dd;
11299 u64 reg;
11300 u32 ctxt = rcd->ctxt;
11301
11302 /*
11303 * Need to write timeout register before updating RcvHdrHead to ensure
11304 * that a new value is used when the HW decides to restart counting.
11305 */
11306 if (intr_adjust)
11307 adjust_rcv_timeout(rcd, npkts);
11308 if (updegr) {
11309 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11310 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11311 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11312 }
11313 mmiowb();
11314 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11315 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11316 << RCV_HDR_HEAD_HEAD_SHIFT);
11317 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11318 mmiowb();
11319}
11320
11321u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11322{
11323 u32 head, tail;
11324
11325 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11326 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11327
11328 if (rcd->rcvhdrtail_kvaddr)
11329 tail = get_rcvhdrtail(rcd);
11330 else
11331 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11332
11333 return head == tail;
11334}
11335
11336/*
11337 * Context Control and Receive Array encoding for buffer size:
11338 * 0x0 invalid
11339 * 0x1 4 KB
11340 * 0x2 8 KB
11341 * 0x3 16 KB
11342 * 0x4 32 KB
11343 * 0x5 64 KB
11344 * 0x6 128 KB
11345 * 0x7 256 KB
11346 * 0x8 512 KB (Receive Array only)
11347 * 0x9 1 MB (Receive Array only)
11348 * 0xa 2 MB (Receive Array only)
11349 *
11350 * 0xB-0xF - reserved (Receive Array only)
11351 *
11352 *
11353 * This routine assumes that the value has already been sanity checked.
11354 */
11355static u32 encoded_size(u32 size)
11356{
11357 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011358 case 4 * 1024: return 0x1;
11359 case 8 * 1024: return 0x2;
11360 case 16 * 1024: return 0x3;
11361 case 32 * 1024: return 0x4;
11362 case 64 * 1024: return 0x5;
11363 case 128 * 1024: return 0x6;
11364 case 256 * 1024: return 0x7;
11365 case 512 * 1024: return 0x8;
11366 case 1 * 1024 * 1024: return 0x9;
11367 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011368 }
11369 return 0x1; /* if invalid, go with the minimum size */
11370}
11371
11372void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11373{
11374 struct hfi1_ctxtdata *rcd;
11375 u64 rcvctrl, reg;
11376 int did_enable = 0;
11377
11378 rcd = dd->rcd[ctxt];
11379 if (!rcd)
11380 return;
11381
11382 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11383
11384 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11385 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011386 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11387 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011388 /* reset the tail and hdr addresses, and sequence count */
11389 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11390 rcd->rcvhdrq_phys);
11391 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11392 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11393 rcd->rcvhdrqtailaddr_phys);
11394 rcd->seq_cnt = 1;
11395
11396 /* reset the cached receive header queue head value */
11397 rcd->head = 0;
11398
11399 /*
11400 * Zero the receive header queue so we don't get false
11401 * positives when checking the sequence number. The
11402 * sequence numbers could land exactly on the same spot.
11403 * E.g. a rcd restart before the receive header wrapped.
11404 */
11405 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11406
11407 /* starting timeout */
11408 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11409
11410 /* enable the context */
11411 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11412
11413 /* clean the egr buffer size first */
11414 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11415 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11416 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11417 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11418
11419 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11420 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11421 did_enable = 1;
11422
11423 /* zero RcvEgrIndexHead */
11424 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11425
11426 /* set eager count and base index */
11427 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11428 & RCV_EGR_CTRL_EGR_CNT_MASK)
11429 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11430 (((rcd->eager_base >> RCV_SHIFT)
11431 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11432 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11433 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11434
11435 /*
11436 * Set TID (expected) count and base index.
11437 * rcd->expected_count is set to individual RcvArray entries,
11438 * not pairs, and the CSR takes a pair-count in groups of
11439 * four, so divide by 8.
11440 */
11441 reg = (((rcd->expected_count >> RCV_SHIFT)
11442 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11443 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11444 (((rcd->expected_base >> RCV_SHIFT)
11445 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11446 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11447 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011448 if (ctxt == HFI1_CTRL_CTXT)
11449 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011450 }
11451 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11452 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011453 /*
11454 * When receive context is being disabled turn on tail
11455 * update with a dummy tail address and then disable
11456 * receive context.
11457 */
11458 if (dd->rcvhdrtail_dummy_physaddr) {
11459 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11460 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011461 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011462 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11463 }
11464
Mike Marciniszyn77241052015-07-30 15:17:43 -040011465 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11466 }
11467 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11468 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11469 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11470 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11471 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11472 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011473 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11474 /* See comment on RcvCtxtCtrl.TailUpd above */
11475 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11476 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11477 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011478 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11479 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11480 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11481 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11482 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011483 /*
11484 * In one-packet-per-eager mode, the size comes from
11485 * the RcvArray entry.
11486 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011487 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11488 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11489 }
11490 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11491 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11492 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11493 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11494 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11495 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11496 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11497 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11498 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11499 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11500 rcd->rcvctrl = rcvctrl;
11501 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11502 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11503
11504 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011505 if (did_enable &&
11506 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011507 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11508 if (reg != 0) {
11509 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011510 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011511 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11512 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11513 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11514 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11515 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11516 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011517 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011518 }
11519 }
11520
11521 if (did_enable) {
11522 /*
11523 * The interrupt timeout and count must be set after
11524 * the context is enabled to take effect.
11525 */
11526 /* set interrupt timeout */
11527 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011528 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011529 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11530
11531 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11532 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11533 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11534 }
11535
11536 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11537 /*
11538 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011539 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11540 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011541 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011542 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11543 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011544}
11545
Dean Luick582e05c2016-02-18 11:13:01 -080011546u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011547{
11548 int ret;
11549 u64 val = 0;
11550
11551 if (namep) {
11552 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011553 *namep = dd->cntrnames;
11554 } else {
11555 const struct cntr_entry *entry;
11556 int i, j;
11557
11558 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011559
11560 /* Get the start of the block of counters */
11561 *cntrp = dd->cntrs;
11562
11563 /*
11564 * Now go and fill in each counter in the block.
11565 */
11566 for (i = 0; i < DEV_CNTR_LAST; i++) {
11567 entry = &dev_cntrs[i];
11568 hfi1_cdbg(CNTR, "reading %s", entry->name);
11569 if (entry->flags & CNTR_DISABLED) {
11570 /* Nothing */
11571 hfi1_cdbg(CNTR, "\tDisabled\n");
11572 } else {
11573 if (entry->flags & CNTR_VL) {
11574 hfi1_cdbg(CNTR, "\tPer VL\n");
11575 for (j = 0; j < C_VL_COUNT; j++) {
11576 val = entry->rw_cntr(entry,
11577 dd, j,
11578 CNTR_MODE_R,
11579 0);
11580 hfi1_cdbg(
11581 CNTR,
11582 "\t\tRead 0x%llx for %d\n",
11583 val, j);
11584 dd->cntrs[entry->offset + j] =
11585 val;
11586 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011587 } else if (entry->flags & CNTR_SDMA) {
11588 hfi1_cdbg(CNTR,
11589 "\t Per SDMA Engine\n");
11590 for (j = 0; j < dd->chip_sdma_engines;
11591 j++) {
11592 val =
11593 entry->rw_cntr(entry, dd, j,
11594 CNTR_MODE_R, 0);
11595 hfi1_cdbg(CNTR,
11596 "\t\tRead 0x%llx for %d\n",
11597 val, j);
11598 dd->cntrs[entry->offset + j] =
11599 val;
11600 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011601 } else {
11602 val = entry->rw_cntr(entry, dd,
11603 CNTR_INVALID_VL,
11604 CNTR_MODE_R, 0);
11605 dd->cntrs[entry->offset] = val;
11606 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11607 }
11608 }
11609 }
11610 }
11611 return ret;
11612}
11613
11614/*
11615 * Used by sysfs to create files for hfi stats to read
11616 */
Dean Luick582e05c2016-02-18 11:13:01 -080011617u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011618{
11619 int ret;
11620 u64 val = 0;
11621
11622 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011623 ret = ppd->dd->portcntrnameslen;
11624 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011625 } else {
11626 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011627 int i, j;
11628
Dean Luick582e05c2016-02-18 11:13:01 -080011629 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011630 *cntrp = ppd->cntrs;
11631
11632 for (i = 0; i < PORT_CNTR_LAST; i++) {
11633 entry = &port_cntrs[i];
11634 hfi1_cdbg(CNTR, "reading %s", entry->name);
11635 if (entry->flags & CNTR_DISABLED) {
11636 /* Nothing */
11637 hfi1_cdbg(CNTR, "\tDisabled\n");
11638 continue;
11639 }
11640
11641 if (entry->flags & CNTR_VL) {
11642 hfi1_cdbg(CNTR, "\tPer VL");
11643 for (j = 0; j < C_VL_COUNT; j++) {
11644 val = entry->rw_cntr(entry, ppd, j,
11645 CNTR_MODE_R,
11646 0);
11647 hfi1_cdbg(
11648 CNTR,
11649 "\t\tRead 0x%llx for %d",
11650 val, j);
11651 ppd->cntrs[entry->offset + j] = val;
11652 }
11653 } else {
11654 val = entry->rw_cntr(entry, ppd,
11655 CNTR_INVALID_VL,
11656 CNTR_MODE_R,
11657 0);
11658 ppd->cntrs[entry->offset] = val;
11659 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11660 }
11661 }
11662 }
11663 return ret;
11664}
11665
11666static void free_cntrs(struct hfi1_devdata *dd)
11667{
11668 struct hfi1_pportdata *ppd;
11669 int i;
11670
11671 if (dd->synth_stats_timer.data)
11672 del_timer_sync(&dd->synth_stats_timer);
11673 dd->synth_stats_timer.data = 0;
11674 ppd = (struct hfi1_pportdata *)(dd + 1);
11675 for (i = 0; i < dd->num_pports; i++, ppd++) {
11676 kfree(ppd->cntrs);
11677 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011678 free_percpu(ppd->ibport_data.rvp.rc_acks);
11679 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11680 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011681 ppd->cntrs = NULL;
11682 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011683 ppd->ibport_data.rvp.rc_acks = NULL;
11684 ppd->ibport_data.rvp.rc_qacks = NULL;
11685 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011686 }
11687 kfree(dd->portcntrnames);
11688 dd->portcntrnames = NULL;
11689 kfree(dd->cntrs);
11690 dd->cntrs = NULL;
11691 kfree(dd->scntrs);
11692 dd->scntrs = NULL;
11693 kfree(dd->cntrnames);
11694 dd->cntrnames = NULL;
11695}
11696
Mike Marciniszyn77241052015-07-30 15:17:43 -040011697static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11698 u64 *psval, void *context, int vl)
11699{
11700 u64 val;
11701 u64 sval = *psval;
11702
11703 if (entry->flags & CNTR_DISABLED) {
11704 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11705 return 0;
11706 }
11707
11708 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11709
11710 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11711
11712 /* If its a synthetic counter there is more work we need to do */
11713 if (entry->flags & CNTR_SYNTH) {
11714 if (sval == CNTR_MAX) {
11715 /* No need to read already saturated */
11716 return CNTR_MAX;
11717 }
11718
11719 if (entry->flags & CNTR_32BIT) {
11720 /* 32bit counters can wrap multiple times */
11721 u64 upper = sval >> 32;
11722 u64 lower = (sval << 32) >> 32;
11723
11724 if (lower > val) { /* hw wrapped */
11725 if (upper == CNTR_32BIT_MAX)
11726 val = CNTR_MAX;
11727 else
11728 upper++;
11729 }
11730
11731 if (val != CNTR_MAX)
11732 val = (upper << 32) | val;
11733
11734 } else {
11735 /* If we rolled we are saturated */
11736 if ((val < sval) || (val > CNTR_MAX))
11737 val = CNTR_MAX;
11738 }
11739 }
11740
11741 *psval = val;
11742
11743 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11744
11745 return val;
11746}
11747
11748static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11749 struct cntr_entry *entry,
11750 u64 *psval, void *context, int vl, u64 data)
11751{
11752 u64 val;
11753
11754 if (entry->flags & CNTR_DISABLED) {
11755 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11756 return 0;
11757 }
11758
11759 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11760
11761 if (entry->flags & CNTR_SYNTH) {
11762 *psval = data;
11763 if (entry->flags & CNTR_32BIT) {
11764 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11765 (data << 32) >> 32);
11766 val = data; /* return the full 64bit value */
11767 } else {
11768 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11769 data);
11770 }
11771 } else {
11772 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11773 }
11774
11775 *psval = val;
11776
11777 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11778
11779 return val;
11780}
11781
11782u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11783{
11784 struct cntr_entry *entry;
11785 u64 *sval;
11786
11787 entry = &dev_cntrs[index];
11788 sval = dd->scntrs + entry->offset;
11789
11790 if (vl != CNTR_INVALID_VL)
11791 sval += vl;
11792
11793 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11794}
11795
11796u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11797{
11798 struct cntr_entry *entry;
11799 u64 *sval;
11800
11801 entry = &dev_cntrs[index];
11802 sval = dd->scntrs + entry->offset;
11803
11804 if (vl != CNTR_INVALID_VL)
11805 sval += vl;
11806
11807 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11808}
11809
11810u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11811{
11812 struct cntr_entry *entry;
11813 u64 *sval;
11814
11815 entry = &port_cntrs[index];
11816 sval = ppd->scntrs + entry->offset;
11817
11818 if (vl != CNTR_INVALID_VL)
11819 sval += vl;
11820
11821 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11822 (index <= C_RCV_HDR_OVF_LAST)) {
11823 /* We do not want to bother for disabled contexts */
11824 return 0;
11825 }
11826
11827 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11828}
11829
11830u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11831{
11832 struct cntr_entry *entry;
11833 u64 *sval;
11834
11835 entry = &port_cntrs[index];
11836 sval = ppd->scntrs + entry->offset;
11837
11838 if (vl != CNTR_INVALID_VL)
11839 sval += vl;
11840
11841 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11842 (index <= C_RCV_HDR_OVF_LAST)) {
11843 /* We do not want to bother for disabled contexts */
11844 return 0;
11845 }
11846
11847 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11848}
11849
11850static void update_synth_timer(unsigned long opaque)
11851{
11852 u64 cur_tx;
11853 u64 cur_rx;
11854 u64 total_flits;
11855 u8 update = 0;
11856 int i, j, vl;
11857 struct hfi1_pportdata *ppd;
11858 struct cntr_entry *entry;
11859
11860 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11861
11862 /*
11863 * Rather than keep beating on the CSRs pick a minimal set that we can
11864 * check to watch for potential roll over. We can do this by looking at
11865 * the number of flits sent/recv. If the total flits exceeds 32bits then
11866 * we have to iterate all the counters and update.
11867 */
11868 entry = &dev_cntrs[C_DC_RCV_FLITS];
11869 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11870
11871 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11872 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11873
11874 hfi1_cdbg(
11875 CNTR,
11876 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11877 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11878
11879 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11880 /*
11881 * May not be strictly necessary to update but it won't hurt and
11882 * simplifies the logic here.
11883 */
11884 update = 1;
11885 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11886 dd->unit);
11887 } else {
11888 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11889 hfi1_cdbg(CNTR,
11890 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11891 total_flits, (u64)CNTR_32BIT_MAX);
11892 if (total_flits >= CNTR_32BIT_MAX) {
11893 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11894 dd->unit);
11895 update = 1;
11896 }
11897 }
11898
11899 if (update) {
11900 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11901 for (i = 0; i < DEV_CNTR_LAST; i++) {
11902 entry = &dev_cntrs[i];
11903 if (entry->flags & CNTR_VL) {
11904 for (vl = 0; vl < C_VL_COUNT; vl++)
11905 read_dev_cntr(dd, i, vl);
11906 } else {
11907 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11908 }
11909 }
11910 ppd = (struct hfi1_pportdata *)(dd + 1);
11911 for (i = 0; i < dd->num_pports; i++, ppd++) {
11912 for (j = 0; j < PORT_CNTR_LAST; j++) {
11913 entry = &port_cntrs[j];
11914 if (entry->flags & CNTR_VL) {
11915 for (vl = 0; vl < C_VL_COUNT; vl++)
11916 read_port_cntr(ppd, j, vl);
11917 } else {
11918 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11919 }
11920 }
11921 }
11922
11923 /*
11924 * We want the value in the register. The goal is to keep track
11925 * of the number of "ticks" not the counter value. In other
11926 * words if the register rolls we want to notice it and go ahead
11927 * and force an update.
11928 */
11929 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11930 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11931 CNTR_MODE_R, 0);
11932
11933 entry = &dev_cntrs[C_DC_RCV_FLITS];
11934 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11935 CNTR_MODE_R, 0);
11936
11937 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11938 dd->unit, dd->last_tx, dd->last_rx);
11939
11940 } else {
11941 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11942 }
11943
Bart Van Assche48a0cc132016-06-03 12:09:56 -070011944 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011945}
11946
11947#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11948static int init_cntrs(struct hfi1_devdata *dd)
11949{
Dean Luickc024c552016-01-11 18:30:57 -050011950 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011951 size_t sz;
11952 char *p;
11953 char name[C_MAX_NAME];
11954 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011955 const char *bit_type_32 = ",32";
11956 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011957
11958 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011959 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11960 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011961
11962 /***********************/
11963 /* per device counters */
11964 /***********************/
11965
11966 /* size names and determine how many we have*/
11967 dd->ndevcntrs = 0;
11968 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011969
11970 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011971 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11972 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11973 continue;
11974 }
11975
11976 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011977 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011978 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011979 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080011980 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011981 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011982 /* Add ",32" for 32-bit counters */
11983 if (dev_cntrs[i].flags & CNTR_32BIT)
11984 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011985 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011986 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011987 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011988 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011989 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011990 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011991 snprintf(name, C_MAX_NAME, "%s%d",
11992 dev_cntrs[i].name, j);
11993 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011994 /* Add ",32" for 32-bit counters */
11995 if (dev_cntrs[i].flags & CNTR_32BIT)
11996 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011997 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011998 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011999 }
12000 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012001 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012002 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012003 /* Add ",32" for 32-bit counters */
12004 if (dev_cntrs[i].flags & CNTR_32BIT)
12005 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012006 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012007 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012008 }
12009 }
12010
12011 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012012 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012013 if (!dd->cntrs)
12014 goto bail;
12015
Dean Luickc024c552016-01-11 18:30:57 -050012016 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012017 if (!dd->scntrs)
12018 goto bail;
12019
Mike Marciniszyn77241052015-07-30 15:17:43 -040012020 /* allocate space for the counter names */
12021 dd->cntrnameslen = sz;
12022 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12023 if (!dd->cntrnames)
12024 goto bail;
12025
12026 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012027 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012028 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12029 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012030 } else if (dev_cntrs[i].flags & CNTR_VL) {
12031 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012032 snprintf(name, C_MAX_NAME, "%s%d",
12033 dev_cntrs[i].name,
12034 vl_from_idx(j));
12035 memcpy(p, name, strlen(name));
12036 p += strlen(name);
12037
12038 /* Counter is 32 bits */
12039 if (dev_cntrs[i].flags & CNTR_32BIT) {
12040 memcpy(p, bit_type_32, bit_type_32_sz);
12041 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012042 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012043
Mike Marciniszyn77241052015-07-30 15:17:43 -040012044 *p++ = '\n';
12045 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012046 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12047 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012048 snprintf(name, C_MAX_NAME, "%s%d",
12049 dev_cntrs[i].name, j);
12050 memcpy(p, name, strlen(name));
12051 p += strlen(name);
12052
12053 /* Counter is 32 bits */
12054 if (dev_cntrs[i].flags & CNTR_32BIT) {
12055 memcpy(p, bit_type_32, bit_type_32_sz);
12056 p += bit_type_32_sz;
12057 }
12058
12059 *p++ = '\n';
12060 }
12061 } else {
12062 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12063 p += strlen(dev_cntrs[i].name);
12064
12065 /* Counter is 32 bits */
12066 if (dev_cntrs[i].flags & CNTR_32BIT) {
12067 memcpy(p, bit_type_32, bit_type_32_sz);
12068 p += bit_type_32_sz;
12069 }
12070
12071 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012072 }
12073 }
12074
12075 /*********************/
12076 /* per port counters */
12077 /*********************/
12078
12079 /*
12080 * Go through the counters for the overflows and disable the ones we
12081 * don't need. This varies based on platform so we need to do it
12082 * dynamically here.
12083 */
12084 rcv_ctxts = dd->num_rcv_contexts;
12085 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12086 i <= C_RCV_HDR_OVF_LAST; i++) {
12087 port_cntrs[i].flags |= CNTR_DISABLED;
12088 }
12089
12090 /* size port counter names and determine how many we have*/
12091 sz = 0;
12092 dd->nportcntrs = 0;
12093 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012094 if (port_cntrs[i].flags & CNTR_DISABLED) {
12095 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12096 continue;
12097 }
12098
12099 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012100 port_cntrs[i].offset = dd->nportcntrs;
12101 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012102 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012103 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012104 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012105 /* Add ",32" for 32-bit counters */
12106 if (port_cntrs[i].flags & CNTR_32BIT)
12107 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012108 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012109 dd->nportcntrs++;
12110 }
12111 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012112 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012113 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012114 /* Add ",32" for 32-bit counters */
12115 if (port_cntrs[i].flags & CNTR_32BIT)
12116 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012117 port_cntrs[i].offset = dd->nportcntrs;
12118 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012119 }
12120 }
12121
12122 /* allocate space for the counter names */
12123 dd->portcntrnameslen = sz;
12124 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12125 if (!dd->portcntrnames)
12126 goto bail;
12127
12128 /* fill in port cntr names */
12129 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12130 if (port_cntrs[i].flags & CNTR_DISABLED)
12131 continue;
12132
12133 if (port_cntrs[i].flags & CNTR_VL) {
12134 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012135 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012136 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012137 memcpy(p, name, strlen(name));
12138 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012139
12140 /* Counter is 32 bits */
12141 if (port_cntrs[i].flags & CNTR_32BIT) {
12142 memcpy(p, bit_type_32, bit_type_32_sz);
12143 p += bit_type_32_sz;
12144 }
12145
Mike Marciniszyn77241052015-07-30 15:17:43 -040012146 *p++ = '\n';
12147 }
12148 } else {
12149 memcpy(p, port_cntrs[i].name,
12150 strlen(port_cntrs[i].name));
12151 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012152
12153 /* Counter is 32 bits */
12154 if (port_cntrs[i].flags & CNTR_32BIT) {
12155 memcpy(p, bit_type_32, bit_type_32_sz);
12156 p += bit_type_32_sz;
12157 }
12158
Mike Marciniszyn77241052015-07-30 15:17:43 -040012159 *p++ = '\n';
12160 }
12161 }
12162
12163 /* allocate per port storage for counter values */
12164 ppd = (struct hfi1_pportdata *)(dd + 1);
12165 for (i = 0; i < dd->num_pports; i++, ppd++) {
12166 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12167 if (!ppd->cntrs)
12168 goto bail;
12169
12170 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12171 if (!ppd->scntrs)
12172 goto bail;
12173 }
12174
12175 /* CPU counters need to be allocated and zeroed */
12176 if (init_cpu_counters(dd))
12177 goto bail;
12178
12179 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12180 return 0;
12181bail:
12182 free_cntrs(dd);
12183 return -ENOMEM;
12184}
12185
Mike Marciniszyn77241052015-07-30 15:17:43 -040012186static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12187{
12188 switch (chip_lstate) {
12189 default:
12190 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012191 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12192 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012193 /* fall through */
12194 case LSTATE_DOWN:
12195 return IB_PORT_DOWN;
12196 case LSTATE_INIT:
12197 return IB_PORT_INIT;
12198 case LSTATE_ARMED:
12199 return IB_PORT_ARMED;
12200 case LSTATE_ACTIVE:
12201 return IB_PORT_ACTIVE;
12202 }
12203}
12204
12205u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12206{
12207 /* look at the HFI meta-states only */
12208 switch (chip_pstate & 0xf0) {
12209 default:
12210 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012211 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012212 /* fall through */
12213 case PLS_DISABLED:
12214 return IB_PORTPHYSSTATE_DISABLED;
12215 case PLS_OFFLINE:
12216 return OPA_PORTPHYSSTATE_OFFLINE;
12217 case PLS_POLLING:
12218 return IB_PORTPHYSSTATE_POLLING;
12219 case PLS_CONFIGPHY:
12220 return IB_PORTPHYSSTATE_TRAINING;
12221 case PLS_LINKUP:
12222 return IB_PORTPHYSSTATE_LINKUP;
12223 case PLS_PHYTEST:
12224 return IB_PORTPHYSSTATE_PHY_TEST;
12225 }
12226}
12227
12228/* return the OPA port logical state name */
12229const char *opa_lstate_name(u32 lstate)
12230{
12231 static const char * const port_logical_names[] = {
12232 "PORT_NOP",
12233 "PORT_DOWN",
12234 "PORT_INIT",
12235 "PORT_ARMED",
12236 "PORT_ACTIVE",
12237 "PORT_ACTIVE_DEFER",
12238 };
12239 if (lstate < ARRAY_SIZE(port_logical_names))
12240 return port_logical_names[lstate];
12241 return "unknown";
12242}
12243
12244/* return the OPA port physical state name */
12245const char *opa_pstate_name(u32 pstate)
12246{
12247 static const char * const port_physical_names[] = {
12248 "PHYS_NOP",
12249 "reserved1",
12250 "PHYS_POLL",
12251 "PHYS_DISABLED",
12252 "PHYS_TRAINING",
12253 "PHYS_LINKUP",
12254 "PHYS_LINK_ERR_RECOVER",
12255 "PHYS_PHY_TEST",
12256 "reserved8",
12257 "PHYS_OFFLINE",
12258 "PHYS_GANGED",
12259 "PHYS_TEST",
12260 };
12261 if (pstate < ARRAY_SIZE(port_physical_names))
12262 return port_physical_names[pstate];
12263 return "unknown";
12264}
12265
12266/*
12267 * Read the hardware link state and set the driver's cached value of it.
12268 * Return the (new) current value.
12269 */
12270u32 get_logical_state(struct hfi1_pportdata *ppd)
12271{
12272 u32 new_state;
12273
12274 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12275 if (new_state != ppd->lstate) {
12276 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012277 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012278 ppd->lstate = new_state;
12279 }
12280 /*
12281 * Set port status flags in the page mapped into userspace
12282 * memory. Do it here to ensure a reliable state - this is
12283 * the only function called by all state handling code.
12284 * Always set the flags due to the fact that the cache value
12285 * might have been changed explicitly outside of this
12286 * function.
12287 */
12288 if (ppd->statusp) {
12289 switch (ppd->lstate) {
12290 case IB_PORT_DOWN:
12291 case IB_PORT_INIT:
12292 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12293 HFI1_STATUS_IB_READY);
12294 break;
12295 case IB_PORT_ARMED:
12296 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12297 break;
12298 case IB_PORT_ACTIVE:
12299 *ppd->statusp |= HFI1_STATUS_IB_READY;
12300 break;
12301 }
12302 }
12303 return ppd->lstate;
12304}
12305
12306/**
12307 * wait_logical_linkstate - wait for an IB link state change to occur
12308 * @ppd: port device
12309 * @state: the state to wait for
12310 * @msecs: the number of milliseconds to wait
12311 *
12312 * Wait up to msecs milliseconds for IB link state change to occur.
12313 * For now, take the easy polling route.
12314 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12315 */
12316static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12317 int msecs)
12318{
12319 unsigned long timeout;
12320
12321 timeout = jiffies + msecs_to_jiffies(msecs);
12322 while (1) {
12323 if (get_logical_state(ppd) == state)
12324 return 0;
12325 if (time_after(jiffies, timeout))
12326 break;
12327 msleep(20);
12328 }
12329 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12330
12331 return -ETIMEDOUT;
12332}
12333
12334u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12335{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012336 u32 pstate;
12337 u32 ib_pstate;
12338
12339 pstate = read_physical_state(ppd->dd);
12340 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012341 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012342 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012343 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12344 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12345 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012346 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012347 }
12348 return ib_pstate;
12349}
12350
12351/*
12352 * Read/modify/write ASIC_QSFP register bits as selected by mask
12353 * data: 0 or 1 in the positions depending on what needs to be written
12354 * dir: 0 for read, 1 for write
12355 * mask: select by setting
12356 * I2CCLK (bit 0)
12357 * I2CDATA (bit 1)
12358 */
12359u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12360 u32 mask)
12361{
12362 u64 qsfp_oe, target_oe;
12363
12364 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12365 if (mask) {
12366 /* We are writing register bits, so lock access */
12367 dir &= mask;
12368 data &= mask;
12369
12370 qsfp_oe = read_csr(dd, target_oe);
12371 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12372 write_csr(dd, target_oe, qsfp_oe);
12373 }
12374 /* We are exclusively reading bits here, but it is unlikely
12375 * we'll get valid data when we set the direction of the pin
12376 * in the same call, so read should call this function again
12377 * to get valid data
12378 */
12379 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12380}
12381
12382#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12383(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12384
12385#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12386(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12387
12388int hfi1_init_ctxt(struct send_context *sc)
12389{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012390 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012391 struct hfi1_devdata *dd = sc->dd;
12392 u64 reg;
12393 u8 set = (sc->type == SC_USER ?
12394 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12395 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12396 reg = read_kctxt_csr(dd, sc->hw_context,
12397 SEND_CTXT_CHECK_ENABLE);
12398 if (set)
12399 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12400 else
12401 SET_STATIC_RATE_CONTROL_SMASK(reg);
12402 write_kctxt_csr(dd, sc->hw_context,
12403 SEND_CTXT_CHECK_ENABLE, reg);
12404 }
12405 return 0;
12406}
12407
12408int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12409{
12410 int ret = 0;
12411 u64 reg;
12412
12413 if (dd->icode != ICODE_RTL_SILICON) {
12414 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12415 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12416 __func__);
12417 return -EINVAL;
12418 }
12419 reg = read_csr(dd, ASIC_STS_THERM);
12420 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12421 ASIC_STS_THERM_CURR_TEMP_MASK);
12422 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12423 ASIC_STS_THERM_LO_TEMP_MASK);
12424 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12425 ASIC_STS_THERM_HI_TEMP_MASK);
12426 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12427 ASIC_STS_THERM_CRIT_TEMP_MASK);
12428 /* triggers is a 3-bit value - 1 bit per trigger. */
12429 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12430
12431 return ret;
12432}
12433
12434/* ========================================================================= */
12435
12436/*
12437 * Enable/disable chip from delivering interrupts.
12438 */
12439void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12440{
12441 int i;
12442
12443 /*
12444 * In HFI, the mask needs to be 1 to allow interrupts.
12445 */
12446 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012447 /* enable all interrupts */
12448 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012449 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012450
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012451 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012452 } else {
12453 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012454 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012455 }
12456}
12457
12458/*
12459 * Clear all interrupt sources on the chip.
12460 */
12461static void clear_all_interrupts(struct hfi1_devdata *dd)
12462{
12463 int i;
12464
12465 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012466 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012467
12468 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12469 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12470 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12471 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12472 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12473 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12474 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12475 for (i = 0; i < dd->chip_send_contexts; i++)
12476 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12477 for (i = 0; i < dd->chip_sdma_engines; i++)
12478 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12479
12480 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12481 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12482 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12483}
12484
12485/* Move to pcie.c? */
12486static void disable_intx(struct pci_dev *pdev)
12487{
12488 pci_intx(pdev, 0);
12489}
12490
12491static void clean_up_interrupts(struct hfi1_devdata *dd)
12492{
12493 int i;
12494
12495 /* remove irqs - must happen before disabling/turning off */
12496 if (dd->num_msix_entries) {
12497 /* MSI-X */
12498 struct hfi1_msix_entry *me = dd->msix_entries;
12499
12500 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012501 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012502 continue;
12503 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012504 free_irq(me->msix.vector, me->arg);
12505 }
12506 } else {
12507 /* INTx */
12508 if (dd->requested_intx_irq) {
12509 free_irq(dd->pcidev->irq, dd);
12510 dd->requested_intx_irq = 0;
12511 }
12512 }
12513
12514 /* turn off interrupts */
12515 if (dd->num_msix_entries) {
12516 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012517 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012518 } else {
12519 /* INTx */
12520 disable_intx(dd->pcidev);
12521 }
12522
12523 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012524 kfree(dd->msix_entries);
12525 dd->msix_entries = NULL;
12526 dd->num_msix_entries = 0;
12527}
12528
12529/*
12530 * Remap the interrupt source from the general handler to the given MSI-X
12531 * interrupt.
12532 */
12533static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12534{
12535 u64 reg;
12536 int m, n;
12537
12538 /* clear from the handled mask of the general interrupt */
12539 m = isrc / 64;
12540 n = isrc % 64;
12541 dd->gi_mask[m] &= ~((u64)1 << n);
12542
12543 /* direct the chip source to the given MSI-X interrupt */
12544 m = isrc / 8;
12545 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012546 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12547 reg &= ~((u64)0xff << (8 * n));
12548 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12549 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012550}
12551
12552static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12553 int engine, int msix_intr)
12554{
12555 /*
12556 * SDMA engine interrupt sources grouped by type, rather than
12557 * engine. Per-engine interrupts are as follows:
12558 * SDMA
12559 * SDMAProgress
12560 * SDMAIdle
12561 */
Jubin John8638b772016-02-14 20:19:24 -080012562 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012563 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012564 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012565 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012566 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012567 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012568}
12569
Mike Marciniszyn77241052015-07-30 15:17:43 -040012570static int request_intx_irq(struct hfi1_devdata *dd)
12571{
12572 int ret;
12573
Jubin John98050712015-11-16 21:59:27 -050012574 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12575 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012576 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012577 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012578 if (ret)
12579 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012580 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012581 else
12582 dd->requested_intx_irq = 1;
12583 return ret;
12584}
12585
12586static int request_msix_irqs(struct hfi1_devdata *dd)
12587{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012588 int first_general, last_general;
12589 int first_sdma, last_sdma;
12590 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012591 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012592
12593 /* calculate the ranges we are going to use */
12594 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012595 last_general = first_general + 1;
12596 first_sdma = last_general;
12597 last_sdma = first_sdma + dd->num_sdma;
12598 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012599 last_rx = first_rx + dd->n_krcv_queues;
12600
12601 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012602 * Sanity check - the code expects all SDMA chip source
12603 * interrupts to be in the same CSR, starting at bit 0. Verify
12604 * that this is true by checking the bit location of the start.
12605 */
12606 BUILD_BUG_ON(IS_SDMA_START % 64);
12607
12608 for (i = 0; i < dd->num_msix_entries; i++) {
12609 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12610 const char *err_info;
12611 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012612 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012613 void *arg;
12614 int idx;
12615 struct hfi1_ctxtdata *rcd = NULL;
12616 struct sdma_engine *sde = NULL;
12617
12618 /* obtain the arguments to request_irq */
12619 if (first_general <= i && i < last_general) {
12620 idx = i - first_general;
12621 handler = general_interrupt;
12622 arg = dd;
12623 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012624 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012625 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012626 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012627 } else if (first_sdma <= i && i < last_sdma) {
12628 idx = i - first_sdma;
12629 sde = &dd->per_sdma[idx];
12630 handler = sdma_interrupt;
12631 arg = sde;
12632 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012633 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012634 err_info = "sdma";
12635 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012636 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012637 } else if (first_rx <= i && i < last_rx) {
12638 idx = i - first_rx;
12639 rcd = dd->rcd[idx];
12640 /* no interrupt if no rcd */
12641 if (!rcd)
12642 continue;
12643 /*
12644 * Set the interrupt register and mask for this
12645 * context's interrupt.
12646 */
Jubin John8638b772016-02-14 20:19:24 -080012647 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012648 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012649 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012650 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012651 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012652 arg = rcd;
12653 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012654 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012655 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012656 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012657 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012658 } else {
12659 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012660 * ignore it
12661 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012662 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012663 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012664 continue;
12665 }
12666 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012667 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012668 continue;
12669 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012670 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012671
Dean Luickf4f30031c2015-10-26 10:28:44 -040012672 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012673 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012674 if (ret) {
12675 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012676 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12677 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012678 return ret;
12679 }
12680 /*
12681 * assign arg after request_irq call, so it will be
12682 * cleaned up
12683 */
12684 me->arg = arg;
12685
Mitko Haralanov957558c2016-02-03 14:33:40 -080012686 ret = hfi1_get_irq_affinity(dd, me);
12687 if (ret)
12688 dd_dev_err(dd,
12689 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012690 }
12691
Mike Marciniszyn77241052015-07-30 15:17:43 -040012692 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012693}
12694
12695/*
12696 * Set the general handler to accept all interrupts, remap all
12697 * chip interrupts back to MSI-X 0.
12698 */
12699static void reset_interrupts(struct hfi1_devdata *dd)
12700{
12701 int i;
12702
12703 /* all interrupts handled by the general handler */
12704 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12705 dd->gi_mask[i] = ~(u64)0;
12706
12707 /* all chip interrupts map to MSI-X 0 */
12708 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012709 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012710}
12711
12712static int set_up_interrupts(struct hfi1_devdata *dd)
12713{
12714 struct hfi1_msix_entry *entries;
12715 u32 total, request;
12716 int i, ret;
12717 int single_interrupt = 0; /* we expect to have all the interrupts */
12718
12719 /*
12720 * Interrupt count:
12721 * 1 general, "slow path" interrupt (includes the SDMA engines
12722 * slow source, SDMACleanupDone)
12723 * N interrupts - one per used SDMA engine
12724 * M interrupt - one per kernel receive context
12725 */
12726 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12727
12728 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12729 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012730 ret = -ENOMEM;
12731 goto fail;
12732 }
12733 /* 1-1 MSI-X entry assignment */
12734 for (i = 0; i < total; i++)
12735 entries[i].msix.entry = i;
12736
12737 /* ask for MSI-X interrupts */
12738 request = total;
12739 request_msix(dd, &request, entries);
12740
12741 if (request == 0) {
12742 /* using INTx */
12743 /* dd->num_msix_entries already zero */
12744 kfree(entries);
12745 single_interrupt = 1;
12746 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12747 } else {
12748 /* using MSI-X */
12749 dd->num_msix_entries = request;
12750 dd->msix_entries = entries;
12751
12752 if (request != total) {
12753 /* using MSI-X, with reduced interrupts */
12754 dd_dev_err(
12755 dd,
12756 "cannot handle reduced interrupt case, want %u, got %u\n",
12757 total, request);
12758 ret = -EINVAL;
12759 goto fail;
12760 }
12761 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12762 }
12763
12764 /* mask all interrupts */
12765 set_intr_state(dd, 0);
12766 /* clear all pending interrupts */
12767 clear_all_interrupts(dd);
12768
12769 /* reset general handler mask, chip MSI-X mappings */
12770 reset_interrupts(dd);
12771
12772 if (single_interrupt)
12773 ret = request_intx_irq(dd);
12774 else
12775 ret = request_msix_irqs(dd);
12776 if (ret)
12777 goto fail;
12778
12779 return 0;
12780
12781fail:
12782 clean_up_interrupts(dd);
12783 return ret;
12784}
12785
12786/*
12787 * Set up context values in dd. Sets:
12788 *
12789 * num_rcv_contexts - number of contexts being used
12790 * n_krcv_queues - number of kernel contexts
12791 * first_user_ctxt - first non-kernel context in array of contexts
12792 * freectxts - number of free user contexts
12793 * num_send_contexts - number of PIO send contexts being used
12794 */
12795static int set_up_context_variables(struct hfi1_devdata *dd)
12796{
12797 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012798 int total_contexts;
12799 int ret;
12800 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070012801 int qos_rmt_count;
12802 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012803
12804 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012805 * Kernel receive contexts:
12806 * - min of 2 or 1 context/numa (excluding control context)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012807 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070012808 * - Context 1 - first kernel context
12809 * - Context 2 - second kernel context
12810 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040012811 */
12812 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012813 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012814 * n_krcvqs is the sum of module parameter kernel receive
12815 * contexts, krcvqs[]. It does not include the control
12816 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012817 */
Dean Luick33a9eb52016-04-12 10:50:22 -070012818 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012819 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012820 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012821 num_kernel_contexts =
12822 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12823 /*
12824 * Every kernel receive context needs an ACK send context.
12825 * one send context is allocated for each VL{0-7} and VL15
12826 */
12827 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12828 dd_dev_err(dd,
12829 "Reducing # kernel rcv contexts to: %d, from %d\n",
12830 (int)(dd->chip_send_contexts - num_vls - 1),
12831 (int)num_kernel_contexts);
12832 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12833 }
12834 /*
Jubin John0852d242016-04-12 11:30:08 -070012835 * User contexts:
12836 * - default to 1 user context per real (non-HT) CPU core if
12837 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012838 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012839 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070012840 num_user_contexts =
12841 cpumask_weight(&dd->affinity->real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012842
12843 total_contexts = num_kernel_contexts + num_user_contexts;
12844
12845 /*
12846 * Adjust the counts given a global max.
12847 */
12848 if (total_contexts > dd->chip_rcv_contexts) {
12849 dd_dev_err(dd,
12850 "Reducing # user receive contexts to: %d, from %d\n",
12851 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12852 (int)num_user_contexts);
12853 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12854 /* recalculate */
12855 total_contexts = num_kernel_contexts + num_user_contexts;
12856 }
12857
Dean Luick8f000f72016-04-12 11:32:06 -070012858 /* each user context requires an entry in the RMT */
12859 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12860 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12861 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12862 dd_dev_err(dd,
12863 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12864 (int)num_user_contexts,
12865 user_rmt_reduced);
12866 /* recalculate */
12867 num_user_contexts = user_rmt_reduced;
12868 total_contexts = num_kernel_contexts + num_user_contexts;
12869 }
12870
Mike Marciniszyn77241052015-07-30 15:17:43 -040012871 /* the first N are kernel contexts, the rest are user contexts */
12872 dd->num_rcv_contexts = total_contexts;
12873 dd->n_krcv_queues = num_kernel_contexts;
12874 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012875 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012876 dd->freectxts = num_user_contexts;
12877 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012878 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12879 (int)dd->chip_rcv_contexts,
12880 (int)dd->num_rcv_contexts,
12881 (int)dd->n_krcv_queues,
12882 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012883
12884 /*
12885 * Receive array allocation:
12886 * All RcvArray entries are divided into groups of 8. This
12887 * is required by the hardware and will speed up writes to
12888 * consecutive entries by using write-combining of the entire
12889 * cacheline.
12890 *
12891 * The number of groups are evenly divided among all contexts.
12892 * any left over groups will be given to the first N user
12893 * contexts.
12894 */
12895 dd->rcv_entries.group_size = RCV_INCREMENT;
12896 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12897 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12898 dd->rcv_entries.nctxt_extra = ngroups -
12899 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12900 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12901 dd->rcv_entries.ngroups,
12902 dd->rcv_entries.nctxt_extra);
12903 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12904 MAX_EAGER_ENTRIES * 2) {
12905 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12906 dd->rcv_entries.group_size;
12907 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012908 "RcvArray group count too high, change to %u\n",
12909 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012910 dd->rcv_entries.nctxt_extra = 0;
12911 }
12912 /*
12913 * PIO send contexts
12914 */
12915 ret = init_sc_pools_and_sizes(dd);
12916 if (ret >= 0) { /* success */
12917 dd->num_send_contexts = ret;
12918 dd_dev_info(
12919 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012920 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040012921 dd->chip_send_contexts,
12922 dd->num_send_contexts,
12923 dd->sc_sizes[SC_KERNEL].count,
12924 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012925 dd->sc_sizes[SC_USER].count,
12926 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012927 ret = 0; /* success */
12928 }
12929
12930 return ret;
12931}
12932
12933/*
12934 * Set the device/port partition key table. The MAD code
12935 * will ensure that, at least, the partial management
12936 * partition key is present in the table.
12937 */
12938static void set_partition_keys(struct hfi1_pportdata *ppd)
12939{
12940 struct hfi1_devdata *dd = ppd->dd;
12941 u64 reg = 0;
12942 int i;
12943
12944 dd_dev_info(dd, "Setting partition keys\n");
12945 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12946 reg |= (ppd->pkeys[i] &
12947 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12948 ((i % 4) *
12949 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12950 /* Each register holds 4 PKey values. */
12951 if ((i % 4) == 3) {
12952 write_csr(dd, RCV_PARTITION_KEY +
12953 ((i - 3) * 2), reg);
12954 reg = 0;
12955 }
12956 }
12957
12958 /* Always enable HW pkeys check when pkeys table is set */
12959 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12960}
12961
12962/*
12963 * These CSRs and memories are uninitialized on reset and must be
12964 * written before reading to set the ECC/parity bits.
12965 *
12966 * NOTE: All user context CSRs that are not mmaped write-only
12967 * (e.g. the TID flows) must be initialized even if the driver never
12968 * reads them.
12969 */
12970static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12971{
12972 int i, j;
12973
12974 /* CceIntMap */
12975 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012976 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012977
12978 /* SendCtxtCreditReturnAddr */
12979 for (i = 0; i < dd->chip_send_contexts; i++)
12980 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12981
12982 /* PIO Send buffers */
12983 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080012984 /*
12985 * These are not normally read, and (presently) have no method
12986 * to be read, so are not pre-initialized
12987 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012988
12989 /* RcvHdrAddr */
12990 /* RcvHdrTailAddr */
12991 /* RcvTidFlowTable */
12992 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12993 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12994 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12995 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012996 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012997 }
12998
12999 /* RcvArray */
13000 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080013001 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013002 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013003
13004 /* RcvQPMapTable */
13005 for (i = 0; i < 32; i++)
13006 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13007}
13008
13009/*
13010 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13011 */
13012static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13013 u64 ctrl_bits)
13014{
13015 unsigned long timeout;
13016 u64 reg;
13017
13018 /* is the condition present? */
13019 reg = read_csr(dd, CCE_STATUS);
13020 if ((reg & status_bits) == 0)
13021 return;
13022
13023 /* clear the condition */
13024 write_csr(dd, CCE_CTRL, ctrl_bits);
13025
13026 /* wait for the condition to clear */
13027 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13028 while (1) {
13029 reg = read_csr(dd, CCE_STATUS);
13030 if ((reg & status_bits) == 0)
13031 return;
13032 if (time_after(jiffies, timeout)) {
13033 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013034 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13035 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013036 return;
13037 }
13038 udelay(1);
13039 }
13040}
13041
13042/* set CCE CSRs to chip reset defaults */
13043static void reset_cce_csrs(struct hfi1_devdata *dd)
13044{
13045 int i;
13046
13047 /* CCE_REVISION read-only */
13048 /* CCE_REVISION2 read-only */
13049 /* CCE_CTRL - bits clear automatically */
13050 /* CCE_STATUS read-only, use CceCtrl to clear */
13051 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13052 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13053 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13054 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13055 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13056 /* CCE_ERR_STATUS read-only */
13057 write_csr(dd, CCE_ERR_MASK, 0);
13058 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13059 /* CCE_ERR_FORCE leave alone */
13060 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13061 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13062 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13063 /* CCE_PCIE_CTRL leave alone */
13064 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13065 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13066 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013067 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013068 }
13069 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13070 /* CCE_MSIX_PBA read-only */
13071 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13072 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13073 }
13074 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13075 write_csr(dd, CCE_INT_MAP, 0);
13076 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13077 /* CCE_INT_STATUS read-only */
13078 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13079 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13080 /* CCE_INT_FORCE leave alone */
13081 /* CCE_INT_BLOCKED read-only */
13082 }
13083 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13084 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13085}
13086
Mike Marciniszyn77241052015-07-30 15:17:43 -040013087/* set MISC CSRs to chip reset defaults */
13088static void reset_misc_csrs(struct hfi1_devdata *dd)
13089{
13090 int i;
13091
13092 for (i = 0; i < 32; i++) {
13093 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13094 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13095 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13096 }
Jubin John4d114fd2016-02-14 20:21:43 -080013097 /*
13098 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13099 * only be written 128-byte chunks
13100 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013101 /* init RSA engine to clear lingering errors */
13102 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13103 write_csr(dd, MISC_CFG_RSA_MU, 0);
13104 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13105 /* MISC_STS_8051_DIGEST read-only */
13106 /* MISC_STS_SBM_DIGEST read-only */
13107 /* MISC_STS_PCIE_DIGEST read-only */
13108 /* MISC_STS_FAB_DIGEST read-only */
13109 /* MISC_ERR_STATUS read-only */
13110 write_csr(dd, MISC_ERR_MASK, 0);
13111 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13112 /* MISC_ERR_FORCE leave alone */
13113}
13114
13115/* set TXE CSRs to chip reset defaults */
13116static void reset_txe_csrs(struct hfi1_devdata *dd)
13117{
13118 int i;
13119
13120 /*
13121 * TXE Kernel CSRs
13122 */
13123 write_csr(dd, SEND_CTRL, 0);
13124 __cm_reset(dd, 0); /* reset CM internal state */
13125 /* SEND_CONTEXTS read-only */
13126 /* SEND_DMA_ENGINES read-only */
13127 /* SEND_PIO_MEM_SIZE read-only */
13128 /* SEND_DMA_MEM_SIZE read-only */
13129 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13130 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13131 /* SEND_PIO_ERR_STATUS read-only */
13132 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13133 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13134 /* SEND_PIO_ERR_FORCE leave alone */
13135 /* SEND_DMA_ERR_STATUS read-only */
13136 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13137 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13138 /* SEND_DMA_ERR_FORCE leave alone */
13139 /* SEND_EGRESS_ERR_STATUS read-only */
13140 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13141 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13142 /* SEND_EGRESS_ERR_FORCE leave alone */
13143 write_csr(dd, SEND_BTH_QP, 0);
13144 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13145 write_csr(dd, SEND_SC2VLT0, 0);
13146 write_csr(dd, SEND_SC2VLT1, 0);
13147 write_csr(dd, SEND_SC2VLT2, 0);
13148 write_csr(dd, SEND_SC2VLT3, 0);
13149 write_csr(dd, SEND_LEN_CHECK0, 0);
13150 write_csr(dd, SEND_LEN_CHECK1, 0);
13151 /* SEND_ERR_STATUS read-only */
13152 write_csr(dd, SEND_ERR_MASK, 0);
13153 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13154 /* SEND_ERR_FORCE read-only */
13155 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013156 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013157 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013158 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13159 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13160 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013161 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013162 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013163 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013164 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013165 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013166 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013167 /* SEND_CM_CREDIT_USED_STATUS read-only */
13168 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13169 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13170 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13171 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13172 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13173 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013174 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013175 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13176 /* SEND_CM_CREDIT_USED_VL read-only */
13177 /* SEND_CM_CREDIT_USED_VL15 read-only */
13178 /* SEND_EGRESS_CTXT_STATUS read-only */
13179 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13180 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13181 /* SEND_EGRESS_ERR_INFO read-only */
13182 /* SEND_EGRESS_ERR_SOURCE read-only */
13183
13184 /*
13185 * TXE Per-Context CSRs
13186 */
13187 for (i = 0; i < dd->chip_send_contexts; i++) {
13188 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13189 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13190 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13191 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13192 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13193 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13194 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13195 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13196 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13197 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13198 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13199 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13200 }
13201
13202 /*
13203 * TXE Per-SDMA CSRs
13204 */
13205 for (i = 0; i < dd->chip_sdma_engines; i++) {
13206 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13207 /* SEND_DMA_STATUS read-only */
13208 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13209 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13210 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13211 /* SEND_DMA_HEAD read-only */
13212 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13213 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13214 /* SEND_DMA_IDLE_CNT read-only */
13215 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13216 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13217 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13218 /* SEND_DMA_ENG_ERR_STATUS read-only */
13219 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13220 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13221 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13222 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13223 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13224 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13225 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13226 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13227 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13228 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13229 }
13230}
13231
13232/*
13233 * Expect on entry:
13234 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13235 */
13236static void init_rbufs(struct hfi1_devdata *dd)
13237{
13238 u64 reg;
13239 int count;
13240
13241 /*
13242 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13243 * clear.
13244 */
13245 count = 0;
13246 while (1) {
13247 reg = read_csr(dd, RCV_STATUS);
13248 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13249 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13250 break;
13251 /*
13252 * Give up after 1ms - maximum wait time.
13253 *
13254 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13255 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13256 * 148 KB / (66% * 250MB/s) = 920us
13257 */
13258 if (count++ > 500) {
13259 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013260 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13261 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013262 break;
13263 }
13264 udelay(2); /* do not busy-wait the CSR */
13265 }
13266
13267 /* start the init - expect RcvCtrl to be 0 */
13268 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13269
13270 /*
13271 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13272 * period after the write before RcvStatus.RxRbufInitDone is valid.
13273 * The delay in the first run through the loop below is sufficient and
13274 * required before the first read of RcvStatus.RxRbufInintDone.
13275 */
13276 read_csr(dd, RCV_CTRL);
13277
13278 /* wait for the init to finish */
13279 count = 0;
13280 while (1) {
13281 /* delay is required first time through - see above */
13282 udelay(2); /* do not busy-wait the CSR */
13283 reg = read_csr(dd, RCV_STATUS);
13284 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13285 break;
13286
13287 /* give up after 100us - slowest possible at 33MHz is 73us */
13288 if (count++ > 50) {
13289 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013290 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13291 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013292 break;
13293 }
13294 }
13295}
13296
13297/* set RXE CSRs to chip reset defaults */
13298static void reset_rxe_csrs(struct hfi1_devdata *dd)
13299{
13300 int i, j;
13301
13302 /*
13303 * RXE Kernel CSRs
13304 */
13305 write_csr(dd, RCV_CTRL, 0);
13306 init_rbufs(dd);
13307 /* RCV_STATUS read-only */
13308 /* RCV_CONTEXTS read-only */
13309 /* RCV_ARRAY_CNT read-only */
13310 /* RCV_BUF_SIZE read-only */
13311 write_csr(dd, RCV_BTH_QP, 0);
13312 write_csr(dd, RCV_MULTICAST, 0);
13313 write_csr(dd, RCV_BYPASS, 0);
13314 write_csr(dd, RCV_VL15, 0);
13315 /* this is a clear-down */
13316 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013317 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013318 /* RCV_ERR_STATUS read-only */
13319 write_csr(dd, RCV_ERR_MASK, 0);
13320 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13321 /* RCV_ERR_FORCE leave alone */
13322 for (i = 0; i < 32; i++)
13323 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13324 for (i = 0; i < 4; i++)
13325 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13326 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13327 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13328 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13329 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13330 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13331 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13332 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13333 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13334 }
13335 for (i = 0; i < 32; i++)
13336 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13337
13338 /*
13339 * RXE Kernel and User Per-Context CSRs
13340 */
13341 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13342 /* kernel */
13343 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13344 /* RCV_CTXT_STATUS read-only */
13345 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13346 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13347 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13348 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13349 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13350 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13351 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13352 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13353 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13354 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13355
13356 /* user */
13357 /* RCV_HDR_TAIL read-only */
13358 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13359 /* RCV_EGR_INDEX_TAIL read-only */
13360 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13361 /* RCV_EGR_OFFSET_TAIL read-only */
13362 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013363 write_uctxt_csr(dd, i,
13364 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013365 }
13366 }
13367}
13368
13369/*
13370 * Set sc2vl tables.
13371 *
13372 * They power on to zeros, so to avoid send context errors
13373 * they need to be set:
13374 *
13375 * SC 0-7 -> VL 0-7 (respectively)
13376 * SC 15 -> VL 15
13377 * otherwise
13378 * -> VL 0
13379 */
13380static void init_sc2vl_tables(struct hfi1_devdata *dd)
13381{
13382 int i;
13383 /* init per architecture spec, constrained by hardware capability */
13384
13385 /* HFI maps sent packets */
13386 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13387 0,
13388 0, 0, 1, 1,
13389 2, 2, 3, 3,
13390 4, 4, 5, 5,
13391 6, 6, 7, 7));
13392 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13393 1,
13394 8, 0, 9, 0,
13395 10, 0, 11, 0,
13396 12, 0, 13, 0,
13397 14, 0, 15, 15));
13398 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13399 2,
13400 16, 0, 17, 0,
13401 18, 0, 19, 0,
13402 20, 0, 21, 0,
13403 22, 0, 23, 0));
13404 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13405 3,
13406 24, 0, 25, 0,
13407 26, 0, 27, 0,
13408 28, 0, 29, 0,
13409 30, 0, 31, 0));
13410
13411 /* DC maps received packets */
13412 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13413 15_0,
13414 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13415 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13416 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13417 31_16,
13418 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13419 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13420
13421 /* initialize the cached sc2vl values consistently with h/w */
13422 for (i = 0; i < 32; i++) {
13423 if (i < 8 || i == 15)
13424 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13425 else
13426 *((u8 *)(dd->sc2vl) + i) = 0;
13427 }
13428}
13429
13430/*
13431 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13432 * depend on the chip going through a power-on reset - a driver may be loaded
13433 * and unloaded many times.
13434 *
13435 * Do not write any CSR values to the chip in this routine - there may be
13436 * a reset following the (possible) FLR in this routine.
13437 *
13438 */
13439static void init_chip(struct hfi1_devdata *dd)
13440{
13441 int i;
13442
13443 /*
13444 * Put the HFI CSRs in a known state.
13445 * Combine this with a DC reset.
13446 *
13447 * Stop the device from doing anything while we do a
13448 * reset. We know there are no other active users of
13449 * the device since we are now in charge. Turn off
13450 * off all outbound and inbound traffic and make sure
13451 * the device does not generate any interrupts.
13452 */
13453
13454 /* disable send contexts and SDMA engines */
13455 write_csr(dd, SEND_CTRL, 0);
13456 for (i = 0; i < dd->chip_send_contexts; i++)
13457 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13458 for (i = 0; i < dd->chip_sdma_engines; i++)
13459 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13460 /* disable port (turn off RXE inbound traffic) and contexts */
13461 write_csr(dd, RCV_CTRL, 0);
13462 for (i = 0; i < dd->chip_rcv_contexts; i++)
13463 write_csr(dd, RCV_CTXT_CTRL, 0);
13464 /* mask all interrupt sources */
13465 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013466 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013467
13468 /*
13469 * DC Reset: do a full DC reset before the register clear.
13470 * A recommended length of time to hold is one CSR read,
13471 * so reread the CceDcCtrl. Then, hold the DC in reset
13472 * across the clear.
13473 */
13474 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013475 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013476
13477 if (use_flr) {
13478 /*
13479 * A FLR will reset the SPC core and part of the PCIe.
13480 * The parts that need to be restored have already been
13481 * saved.
13482 */
13483 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13484
13485 /* do the FLR, the DC reset will remain */
13486 hfi1_pcie_flr(dd);
13487
13488 /* restore command and BARs */
13489 restore_pci_variables(dd);
13490
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013491 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013492 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13493 hfi1_pcie_flr(dd);
13494 restore_pci_variables(dd);
13495 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013496 } else {
13497 dd_dev_info(dd, "Resetting CSRs with writes\n");
13498 reset_cce_csrs(dd);
13499 reset_txe_csrs(dd);
13500 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013501 reset_misc_csrs(dd);
13502 }
13503 /* clear the DC reset */
13504 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013505
Mike Marciniszyn77241052015-07-30 15:17:43 -040013506 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013507 setextled(dd, 0);
13508
Mike Marciniszyn77241052015-07-30 15:17:43 -040013509 /*
13510 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013511 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013512 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013513 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013514 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013515 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013516 * I2CCLK and I2CDAT will change per direction, and INT_N and
13517 * MODPRS_N are input only and their value is ignored.
13518 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013519 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13520 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013521 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013522}
13523
13524static void init_early_variables(struct hfi1_devdata *dd)
13525{
13526 int i;
13527
13528 /* assign link credit variables */
13529 dd->vau = CM_VAU;
13530 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013531 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013532 dd->link_credits--;
13533 dd->vcu = cu_to_vcu(hfi1_cu);
13534 /* enough room for 8 MAD packets plus header - 17K */
13535 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13536 if (dd->vl15_init > dd->link_credits)
13537 dd->vl15_init = dd->link_credits;
13538
13539 write_uninitialized_csrs_and_memories(dd);
13540
13541 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13542 for (i = 0; i < dd->num_pports; i++) {
13543 struct hfi1_pportdata *ppd = &dd->pport[i];
13544
13545 set_partition_keys(ppd);
13546 }
13547 init_sc2vl_tables(dd);
13548}
13549
13550static void init_kdeth_qp(struct hfi1_devdata *dd)
13551{
13552 /* user changed the KDETH_QP */
13553 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13554 /* out of range or illegal value */
13555 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13556 kdeth_qp = 0;
13557 }
13558 if (kdeth_qp == 0) /* not set, or failed range check */
13559 kdeth_qp = DEFAULT_KDETH_QP;
13560
13561 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013562 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13563 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013564
13565 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013566 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13567 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013568}
13569
13570/**
13571 * init_qpmap_table
13572 * @dd - device data
13573 * @first_ctxt - first context
13574 * @last_ctxt - first context
13575 *
13576 * This return sets the qpn mapping table that
13577 * is indexed by qpn[8:1].
13578 *
13579 * The routine will round robin the 256 settings
13580 * from first_ctxt to last_ctxt.
13581 *
13582 * The first/last looks ahead to having specialized
13583 * receive contexts for mgmt and bypass. Normal
13584 * verbs traffic will assumed to be on a range
13585 * of receive contexts.
13586 */
13587static void init_qpmap_table(struct hfi1_devdata *dd,
13588 u32 first_ctxt,
13589 u32 last_ctxt)
13590{
13591 u64 reg = 0;
13592 u64 regno = RCV_QP_MAP_TABLE;
13593 int i;
13594 u64 ctxt = first_ctxt;
13595
Dean Luick60d585ad2016-04-12 10:50:35 -070013596 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013597 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013598 ctxt++;
13599 if (ctxt > last_ctxt)
13600 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070013601 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013602 write_csr(dd, regno, reg);
13603 reg = 0;
13604 regno += 8;
13605 }
13606 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013607
13608 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13609 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13610}
13611
Dean Luick372cc85a2016-04-12 11:30:51 -070013612struct rsm_map_table {
13613 u64 map[NUM_MAP_REGS];
13614 unsigned int used;
13615};
13616
Dean Luickb12349a2016-04-12 11:31:33 -070013617struct rsm_rule_data {
13618 u8 offset;
13619 u8 pkt_type;
13620 u32 field1_off;
13621 u32 field2_off;
13622 u32 index1_off;
13623 u32 index1_width;
13624 u32 index2_off;
13625 u32 index2_width;
13626 u32 mask1;
13627 u32 value1;
13628 u32 mask2;
13629 u32 value2;
13630};
13631
Dean Luick372cc85a2016-04-12 11:30:51 -070013632/*
13633 * Return an initialized RMT map table for users to fill in. OK if it
13634 * returns NULL, indicating no table.
13635 */
13636static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13637{
13638 struct rsm_map_table *rmt;
13639 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13640
13641 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13642 if (rmt) {
13643 memset(rmt->map, rxcontext, sizeof(rmt->map));
13644 rmt->used = 0;
13645 }
13646
13647 return rmt;
13648}
13649
13650/*
13651 * Write the final RMT map table to the chip and free the table. OK if
13652 * table is NULL.
13653 */
13654static void complete_rsm_map_table(struct hfi1_devdata *dd,
13655 struct rsm_map_table *rmt)
13656{
13657 int i;
13658
13659 if (rmt) {
13660 /* write table to chip */
13661 for (i = 0; i < NUM_MAP_REGS; i++)
13662 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13663
13664 /* enable RSM */
13665 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13666 }
13667}
13668
Dean Luickb12349a2016-04-12 11:31:33 -070013669/*
13670 * Add a receive side mapping rule.
13671 */
13672static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13673 struct rsm_rule_data *rrd)
13674{
13675 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13676 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13677 1ull << rule_index | /* enable bit */
13678 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13679 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13680 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13681 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13682 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13683 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13684 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13685 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13686 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13687 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13688 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13689 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13690 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13691}
13692
Dean Luick4a818be2016-04-12 11:31:11 -070013693/* return the number of RSM map table entries that will be used for QOS */
13694static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13695 unsigned int *np)
13696{
13697 int i;
13698 unsigned int m, n;
13699 u8 max_by_vl = 0;
13700
13701 /* is QOS active at all? */
13702 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13703 num_vls == 1 ||
13704 krcvqsset <= 1)
13705 goto no_qos;
13706
13707 /* determine bits for qpn */
13708 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13709 if (krcvqs[i] > max_by_vl)
13710 max_by_vl = krcvqs[i];
13711 if (max_by_vl > 32)
13712 goto no_qos;
13713 m = ilog2(__roundup_pow_of_two(max_by_vl));
13714
13715 /* determine bits for vl */
13716 n = ilog2(__roundup_pow_of_two(num_vls));
13717
13718 /* reject if too much is used */
13719 if ((m + n) > 7)
13720 goto no_qos;
13721
13722 if (mp)
13723 *mp = m;
13724 if (np)
13725 *np = n;
13726
13727 return 1 << (m + n);
13728
13729no_qos:
13730 if (mp)
13731 *mp = 0;
13732 if (np)
13733 *np = 0;
13734 return 0;
13735}
13736
Mike Marciniszyn77241052015-07-30 15:17:43 -040013737/**
13738 * init_qos - init RX qos
13739 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070013740 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040013741 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013742 * This routine initializes Rule 0 and the RSM map table to implement
13743 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040013744 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013745 * If all of the limit tests succeed, qos is applied based on the array
13746 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013747 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013748 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13749 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013750 */
Dean Luick372cc85a2016-04-12 11:30:51 -070013751static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013752{
Dean Luickb12349a2016-04-12 11:31:33 -070013753 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013754 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070013755 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013756 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013757
Dean Luick4a818be2016-04-12 11:31:11 -070013758 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013759 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013760 rmt_entries = qos_rmt_entries(dd, &m, &n);
13761 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013762 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013763 qpns_per_vl = 1 << m;
13764
Dean Luick372cc85a2016-04-12 11:30:51 -070013765 /* enough room in the map table? */
13766 rmt_entries = 1 << (m + n);
13767 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013768 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013769
Dean Luick372cc85a2016-04-12 11:30:51 -070013770 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070013771 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013772 unsigned tctxt;
13773
13774 for (qpn = 0, tctxt = ctxt;
13775 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13776 unsigned idx, regoff, regidx;
13777
Dean Luick372cc85a2016-04-12 11:30:51 -070013778 /* generate the index the hardware will produce */
13779 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013780 regoff = (idx % 8) * 8;
13781 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070013782 /* replace default with context number */
13783 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040013784 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13785 << regoff);
13786 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070013787 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013788 if (tctxt == ctxt + krcvqs[i])
13789 tctxt = ctxt;
13790 }
13791 ctxt += krcvqs[i];
13792 }
Dean Luickb12349a2016-04-12 11:31:33 -070013793
13794 rrd.offset = rmt->used;
13795 rrd.pkt_type = 2;
13796 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13797 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13798 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13799 rrd.index1_width = n;
13800 rrd.index2_off = QPN_SELECT_OFFSET;
13801 rrd.index2_width = m + n;
13802 rrd.mask1 = LRH_BTH_MASK;
13803 rrd.value1 = LRH_BTH_VALUE;
13804 rrd.mask2 = LRH_SC_MASK;
13805 rrd.value2 = LRH_SC_VALUE;
13806
13807 /* add rule 0 */
13808 add_rsm_rule(dd, 0, &rrd);
13809
Dean Luick372cc85a2016-04-12 11:30:51 -070013810 /* mark RSM map entries as used */
13811 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070013812 /* map everything else to the mcast/err/vl15 context */
13813 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013814 dd->qos_shift = n + 1;
13815 return;
13816bail:
13817 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013818 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013819}
13820
Dean Luick8f000f72016-04-12 11:32:06 -070013821static void init_user_fecn_handling(struct hfi1_devdata *dd,
13822 struct rsm_map_table *rmt)
13823{
13824 struct rsm_rule_data rrd;
13825 u64 reg;
13826 int i, idx, regoff, regidx;
13827 u8 offset;
13828
13829 /* there needs to be enough room in the map table */
13830 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13831 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13832 return;
13833 }
13834
13835 /*
13836 * RSM will extract the destination context as an index into the
13837 * map table. The destination contexts are a sequential block
13838 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13839 * Map entries are accessed as offset + extracted value. Adjust
13840 * the added offset so this sequence can be placed anywhere in
13841 * the table - as long as the entries themselves do not wrap.
13842 * There are only enough bits in offset for the table size, so
13843 * start with that to allow for a "negative" offset.
13844 */
13845 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13846 (int)dd->first_user_ctxt);
13847
13848 for (i = dd->first_user_ctxt, idx = rmt->used;
13849 i < dd->num_rcv_contexts; i++, idx++) {
13850 /* replace with identity mapping */
13851 regoff = (idx % 8) * 8;
13852 regidx = idx / 8;
13853 reg = rmt->map[regidx];
13854 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13855 reg |= (u64)i << regoff;
13856 rmt->map[regidx] = reg;
13857 }
13858
13859 /*
13860 * For RSM intercept of Expected FECN packets:
13861 * o packet type 0 - expected
13862 * o match on F (bit 95), using select/match 1, and
13863 * o match on SH (bit 133), using select/match 2.
13864 *
13865 * Use index 1 to extract the 8-bit receive context from DestQP
13866 * (start at bit 64). Use that as the RSM map table index.
13867 */
13868 rrd.offset = offset;
13869 rrd.pkt_type = 0;
13870 rrd.field1_off = 95;
13871 rrd.field2_off = 133;
13872 rrd.index1_off = 64;
13873 rrd.index1_width = 8;
13874 rrd.index2_off = 0;
13875 rrd.index2_width = 0;
13876 rrd.mask1 = 1;
13877 rrd.value1 = 1;
13878 rrd.mask2 = 1;
13879 rrd.value2 = 1;
13880
13881 /* add rule 1 */
13882 add_rsm_rule(dd, 1, &rrd);
13883
13884 rmt->used += dd->num_user_contexts;
13885}
13886
Mike Marciniszyn77241052015-07-30 15:17:43 -040013887static void init_rxe(struct hfi1_devdata *dd)
13888{
Dean Luick372cc85a2016-04-12 11:30:51 -070013889 struct rsm_map_table *rmt;
13890
Mike Marciniszyn77241052015-07-30 15:17:43 -040013891 /* enable all receive errors */
13892 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070013893
13894 rmt = alloc_rsm_map_table(dd);
13895 /* set up QOS, including the QPN map table */
13896 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070013897 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070013898 complete_rsm_map_table(dd, rmt);
13899 kfree(rmt);
13900
Mike Marciniszyn77241052015-07-30 15:17:43 -040013901 /*
13902 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13903 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13904 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13905 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13906 * Max_PayLoad_Size set to its minimum of 128.
13907 *
13908 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13909 * (64 bytes). Max_Payload_Size is possibly modified upward in
13910 * tune_pcie_caps() which is called after this routine.
13911 */
13912}
13913
13914static void init_other(struct hfi1_devdata *dd)
13915{
13916 /* enable all CCE errors */
13917 write_csr(dd, CCE_ERR_MASK, ~0ull);
13918 /* enable *some* Misc errors */
13919 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13920 /* enable all DC errors, except LCB */
13921 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13922 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13923}
13924
13925/*
13926 * Fill out the given AU table using the given CU. A CU is defined in terms
13927 * AUs. The table is a an encoding: given the index, how many AUs does that
13928 * represent?
13929 *
13930 * NOTE: Assumes that the register layout is the same for the
13931 * local and remote tables.
13932 */
13933static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13934 u32 csr0to3, u32 csr4to7)
13935{
13936 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080013937 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13938 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13939 2ull * cu <<
13940 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13941 4ull * cu <<
13942 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013943 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080013944 8ull * cu <<
13945 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13946 16ull * cu <<
13947 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13948 32ull * cu <<
13949 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13950 64ull * cu <<
13951 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013952}
13953
13954static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13955{
13956 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013957 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013958}
13959
13960void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13961{
13962 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013963 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013964}
13965
13966static void init_txe(struct hfi1_devdata *dd)
13967{
13968 int i;
13969
13970 /* enable all PIO, SDMA, general, and Egress errors */
13971 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13972 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13973 write_csr(dd, SEND_ERR_MASK, ~0ull);
13974 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13975
13976 /* enable all per-context and per-SDMA engine errors */
13977 for (i = 0; i < dd->chip_send_contexts; i++)
13978 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13979 for (i = 0; i < dd->chip_sdma_engines; i++)
13980 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13981
13982 /* set the local CU to AU mapping */
13983 assign_local_cm_au_table(dd, dd->vcu);
13984
13985 /*
13986 * Set reasonable default for Credit Return Timer
13987 * Don't set on Simulator - causes it to choke.
13988 */
13989 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13990 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13991}
13992
13993int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13994{
13995 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13996 unsigned sctxt;
13997 int ret = 0;
13998 u64 reg;
13999
14000 if (!rcd || !rcd->sc) {
14001 ret = -EINVAL;
14002 goto done;
14003 }
14004 sctxt = rcd->sc->hw_context;
14005 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14006 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14007 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14008 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14009 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14010 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14011 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14012 /*
14013 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014014 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014015 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014016 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14017 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14018 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14019 }
14020
14021 /* Enable J_KEY check on receive context. */
14022 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14023 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14024 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14025 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14026done:
14027 return ret;
14028}
14029
14030int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14031{
14032 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14033 unsigned sctxt;
14034 int ret = 0;
14035 u64 reg;
14036
14037 if (!rcd || !rcd->sc) {
14038 ret = -EINVAL;
14039 goto done;
14040 }
14041 sctxt = rcd->sc->hw_context;
14042 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14043 /*
14044 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14045 * This check would not have been enabled for A0 h/w, see
14046 * set_ctxt_jkey().
14047 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014048 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014049 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14050 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14051 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14052 }
14053 /* Turn off the J_KEY on the receive side */
14054 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14055done:
14056 return ret;
14057}
14058
14059int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14060{
14061 struct hfi1_ctxtdata *rcd;
14062 unsigned sctxt;
14063 int ret = 0;
14064 u64 reg;
14065
Jubin Johne4909742016-02-14 20:22:00 -080014066 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014067 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014068 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014069 ret = -EINVAL;
14070 goto done;
14071 }
14072 if (!rcd || !rcd->sc) {
14073 ret = -EINVAL;
14074 goto done;
14075 }
14076 sctxt = rcd->sc->hw_context;
14077 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14078 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14079 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14080 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14081 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014082 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014083 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14084done:
14085 return ret;
14086}
14087
14088int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14089{
14090 struct hfi1_ctxtdata *rcd;
14091 unsigned sctxt;
14092 int ret = 0;
14093 u64 reg;
14094
Jubin Johne4909742016-02-14 20:22:00 -080014095 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014096 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014097 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014098 ret = -EINVAL;
14099 goto done;
14100 }
14101 if (!rcd || !rcd->sc) {
14102 ret = -EINVAL;
14103 goto done;
14104 }
14105 sctxt = rcd->sc->hw_context;
14106 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14107 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14108 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14109 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14110done:
14111 return ret;
14112}
14113
14114/*
14115 * Start doing the clean up the the chip. Our clean up happens in multiple
14116 * stages and this is just the first.
14117 */
14118void hfi1_start_cleanup(struct hfi1_devdata *dd)
14119{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014120 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014121 free_cntrs(dd);
14122 free_rcverr(dd);
14123 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014124 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014125}
14126
14127#define HFI_BASE_GUID(dev) \
14128 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14129
14130/*
Dean Luick78eb1292016-03-05 08:49:45 -080014131 * Information can be shared between the two HFIs on the same ASIC
14132 * in the same OS. This function finds the peer device and sets
14133 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014134 */
Dean Luick78eb1292016-03-05 08:49:45 -080014135static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014136{
14137 unsigned long flags;
14138 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014139 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014140 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014141
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014142 /* pre-allocate the asic structure in case we are the first device */
14143 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14144 if (!asic_data)
14145 return -ENOMEM;
14146
Mike Marciniszyn77241052015-07-30 15:17:43 -040014147 spin_lock_irqsave(&hfi1_devs_lock, flags);
14148 /* Find our peer device */
14149 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14150 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14151 dd->unit != tmp->unit) {
14152 peer = tmp;
14153 break;
14154 }
14155 }
14156
Dean Luick78eb1292016-03-05 08:49:45 -080014157 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014158 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014159 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014160 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014161 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014162 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014163 mutex_init(&dd->asic_data->asic_resource_mutex);
14164 }
14165 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014166 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luick78eb1292016-03-05 08:49:45 -080014167 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014168}
14169
Dean Luick5d9157a2015-11-16 21:59:34 -050014170/*
14171 * Set dd->boardname. Use a generic name if a name is not returned from
14172 * EFI variable space.
14173 *
14174 * Return 0 on success, -ENOMEM if space could not be allocated.
14175 */
14176static int obtain_boardname(struct hfi1_devdata *dd)
14177{
14178 /* generic board description */
14179 const char generic[] =
14180 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14181 unsigned long size;
14182 int ret;
14183
14184 ret = read_hfi1_efi_var(dd, "description", &size,
14185 (void **)&dd->boardname);
14186 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014187 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014188 /* use generic description */
14189 dd->boardname = kstrdup(generic, GFP_KERNEL);
14190 if (!dd->boardname)
14191 return -ENOMEM;
14192 }
14193 return 0;
14194}
14195
Kaike Wan24487dd2016-02-26 13:33:23 -080014196/*
14197 * Check the interrupt registers to make sure that they are mapped correctly.
14198 * It is intended to help user identify any mismapping by VMM when the driver
14199 * is running in a VM. This function should only be called before interrupt
14200 * is set up properly.
14201 *
14202 * Return 0 on success, -EINVAL on failure.
14203 */
14204static int check_int_registers(struct hfi1_devdata *dd)
14205{
14206 u64 reg;
14207 u64 all_bits = ~(u64)0;
14208 u64 mask;
14209
14210 /* Clear CceIntMask[0] to avoid raising any interrupts */
14211 mask = read_csr(dd, CCE_INT_MASK);
14212 write_csr(dd, CCE_INT_MASK, 0ull);
14213 reg = read_csr(dd, CCE_INT_MASK);
14214 if (reg)
14215 goto err_exit;
14216
14217 /* Clear all interrupt status bits */
14218 write_csr(dd, CCE_INT_CLEAR, all_bits);
14219 reg = read_csr(dd, CCE_INT_STATUS);
14220 if (reg)
14221 goto err_exit;
14222
14223 /* Set all interrupt status bits */
14224 write_csr(dd, CCE_INT_FORCE, all_bits);
14225 reg = read_csr(dd, CCE_INT_STATUS);
14226 if (reg != all_bits)
14227 goto err_exit;
14228
14229 /* Restore the interrupt mask */
14230 write_csr(dd, CCE_INT_CLEAR, all_bits);
14231 write_csr(dd, CCE_INT_MASK, mask);
14232
14233 return 0;
14234err_exit:
14235 write_csr(dd, CCE_INT_MASK, mask);
14236 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14237 return -EINVAL;
14238}
14239
Mike Marciniszyn77241052015-07-30 15:17:43 -040014240/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014241 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014242 * @dev: the pci_dev for hfi1_ib device
14243 * @ent: pci_device_id struct for this dev
14244 *
14245 * Also allocates, initializes, and returns the devdata struct for this
14246 * device instance
14247 *
14248 * This is global, and is called directly at init to set up the
14249 * chip-specific function pointers for later use.
14250 */
14251struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14252 const struct pci_device_id *ent)
14253{
14254 struct hfi1_devdata *dd;
14255 struct hfi1_pportdata *ppd;
14256 u64 reg;
14257 int i, ret;
14258 static const char * const inames[] = { /* implementation names */
14259 "RTL silicon",
14260 "RTL VCS simulation",
14261 "RTL FPGA emulation",
14262 "Functional simulator"
14263 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014264 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014265
Jubin John17fb4f22016-02-14 20:21:52 -080014266 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14267 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014268 if (IS_ERR(dd))
14269 goto bail;
14270 ppd = dd->pport;
14271 for (i = 0; i < dd->num_pports; i++, ppd++) {
14272 int vl;
14273 /* init common fields */
14274 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14275 /* DC supports 4 link widths */
14276 ppd->link_width_supported =
14277 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14278 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14279 ppd->link_width_downgrade_supported =
14280 ppd->link_width_supported;
14281 /* start out enabling only 4X */
14282 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14283 ppd->link_width_downgrade_enabled =
14284 ppd->link_width_downgrade_supported;
14285 /* link width active is 0 when link is down */
14286 /* link width downgrade active is 0 when link is down */
14287
Jubin Johnd0d236e2016-02-14 20:20:15 -080014288 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14289 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014290 hfi1_early_err(&pdev->dev,
14291 "Invalid num_vls %u, using %u VLs\n",
14292 num_vls, HFI1_MAX_VLS_SUPPORTED);
14293 num_vls = HFI1_MAX_VLS_SUPPORTED;
14294 }
14295 ppd->vls_supported = num_vls;
14296 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014297 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014298 /* Set the default MTU. */
14299 for (vl = 0; vl < num_vls; vl++)
14300 dd->vld[vl].mtu = hfi1_max_mtu;
14301 dd->vld[15].mtu = MAX_MAD_PACKET;
14302 /*
14303 * Set the initial values to reasonable default, will be set
14304 * for real when link is up.
14305 */
14306 ppd->lstate = IB_PORT_DOWN;
14307 ppd->overrun_threshold = 0x4;
14308 ppd->phy_error_threshold = 0xf;
14309 ppd->port_crc_mode_enabled = link_crc_mask;
14310 /* initialize supported LTP CRC mode */
14311 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14312 /* initialize enabled LTP CRC mode */
14313 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14314 /* start in offline */
14315 ppd->host_link_state = HLS_DN_OFFLINE;
14316 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014317 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014318 }
14319
14320 dd->link_default = HLS_DN_POLL;
14321
14322 /*
14323 * Do remaining PCIe setup and save PCIe values in dd.
14324 * Any error printing is already done by the init code.
14325 * On return, we have the chip mapped.
14326 */
14327 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14328 if (ret < 0)
14329 goto bail_free;
14330
14331 /* verify that reads actually work, save revision for reset check */
14332 dd->revision = read_csr(dd, CCE_REVISION);
14333 if (dd->revision == ~(u64)0) {
14334 dd_dev_err(dd, "cannot read chip CSRs\n");
14335 ret = -EINVAL;
14336 goto bail_cleanup;
14337 }
14338 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14339 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14340 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14341 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14342
Jubin John4d114fd2016-02-14 20:21:43 -080014343 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014344 * Check interrupt registers mapping if the driver has no access to
14345 * the upstream component. In this case, it is likely that the driver
14346 * is running in a VM.
14347 */
14348 if (!parent) {
14349 ret = check_int_registers(dd);
14350 if (ret)
14351 goto bail_cleanup;
14352 }
14353
14354 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014355 * obtain the hardware ID - NOT related to unit, which is a
14356 * software enumeration
14357 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014358 reg = read_csr(dd, CCE_REVISION2);
14359 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14360 & CCE_REVISION2_HFI_ID_MASK;
14361 /* the variable size will remove unwanted bits */
14362 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14363 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14364 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014365 dd->icode < ARRAY_SIZE(inames) ?
14366 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014367
14368 /* speeds the hardware can support */
14369 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14370 /* speeds allowed to run at */
14371 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14372 /* give a reasonable active value, will be set on link up */
14373 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14374
14375 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14376 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14377 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14378 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14379 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14380 /* fix up link widths for emulation _p */
14381 ppd = dd->pport;
14382 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14383 ppd->link_width_supported =
14384 ppd->link_width_enabled =
14385 ppd->link_width_downgrade_supported =
14386 ppd->link_width_downgrade_enabled =
14387 OPA_LINK_WIDTH_1X;
14388 }
14389 /* insure num_vls isn't larger than number of sdma engines */
14390 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14391 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014392 num_vls, dd->chip_sdma_engines);
14393 num_vls = dd->chip_sdma_engines;
14394 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014395 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014396 }
14397
14398 /*
14399 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14400 * Limit the max if larger than the field holds. If timeout is
14401 * non-zero, then the calculated field will be at least 1.
14402 *
14403 * Must be after icode is set up - the cclock rate depends
14404 * on knowing the hardware being used.
14405 */
14406 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14407 if (dd->rcv_intr_timeout_csr >
14408 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14409 dd->rcv_intr_timeout_csr =
14410 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14411 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14412 dd->rcv_intr_timeout_csr = 1;
14413
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014414 /* needs to be done before we look for the peer device */
14415 read_guid(dd);
14416
Dean Luick78eb1292016-03-05 08:49:45 -080014417 /* set up shared ASIC data with peer device */
14418 ret = init_asic_data(dd);
14419 if (ret)
14420 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014421
Mike Marciniszyn77241052015-07-30 15:17:43 -040014422 /* obtain chip sizes, reset chip CSRs */
14423 init_chip(dd);
14424
14425 /* read in the PCIe link speed information */
14426 ret = pcie_speeds(dd);
14427 if (ret)
14428 goto bail_cleanup;
14429
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014430 /* Needs to be called before hfi1_firmware_init */
14431 get_platform_config(dd);
14432
Mike Marciniszyn77241052015-07-30 15:17:43 -040014433 /* read in firmware */
14434 ret = hfi1_firmware_init(dd);
14435 if (ret)
14436 goto bail_cleanup;
14437
14438 /*
14439 * In general, the PCIe Gen3 transition must occur after the
14440 * chip has been idled (so it won't initiate any PCIe transactions
14441 * e.g. an interrupt) and before the driver changes any registers
14442 * (the transition will reset the registers).
14443 *
14444 * In particular, place this call after:
14445 * - init_chip() - the chip will not initiate any PCIe transactions
14446 * - pcie_speeds() - reads the current link speed
14447 * - hfi1_firmware_init() - the needed firmware is ready to be
14448 * downloaded
14449 */
14450 ret = do_pcie_gen3_transition(dd);
14451 if (ret)
14452 goto bail_cleanup;
14453
14454 /* start setting dd values and adjusting CSRs */
14455 init_early_variables(dd);
14456
14457 parse_platform_config(dd);
14458
Dean Luick5d9157a2015-11-16 21:59:34 -050014459 ret = obtain_boardname(dd);
14460 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014461 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014462
14463 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014464 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014465 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014466 (u32)dd->majrev,
14467 (u32)dd->minrev,
14468 (dd->revision >> CCE_REVISION_SW_SHIFT)
14469 & CCE_REVISION_SW_MASK);
14470
Jubin John0852d242016-04-12 11:30:08 -070014471 /*
14472 * The real cpu mask is part of the affinity struct but has to be
14473 * initialized earlier than the rest of the affinity struct because it
14474 * is needed to calculate the number of user contexts in
14475 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14476 * which initializes the rest of the affinity struct members,
14477 * depends on set_up_context_variables() for the number of kernel
14478 * contexts, so it cannot be called before set_up_context_variables().
14479 */
14480 ret = init_real_cpu_mask(dd);
14481 if (ret)
14482 goto bail_cleanup;
14483
Mike Marciniszyn77241052015-07-30 15:17:43 -040014484 ret = set_up_context_variables(dd);
14485 if (ret)
14486 goto bail_cleanup;
14487
14488 /* set initial RXE CSRs */
14489 init_rxe(dd);
14490 /* set initial TXE CSRs */
14491 init_txe(dd);
14492 /* set initial non-RXE, non-TXE CSRs */
14493 init_other(dd);
14494 /* set up KDETH QP prefix in both RX and TX CSRs */
14495 init_kdeth_qp(dd);
14496
Jubin John0852d242016-04-12 11:30:08 -070014497 hfi1_dev_affinity_init(dd);
Mitko Haralanov957558c2016-02-03 14:33:40 -080014498
Mike Marciniszyn77241052015-07-30 15:17:43 -040014499 /* send contexts must be set up before receive contexts */
14500 ret = init_send_contexts(dd);
14501 if (ret)
14502 goto bail_cleanup;
14503
14504 ret = hfi1_create_ctxts(dd);
14505 if (ret)
14506 goto bail_cleanup;
14507
14508 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14509 /*
14510 * rcd[0] is guaranteed to be valid by this point. Also, all
14511 * context are using the same value, as per the module parameter.
14512 */
14513 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14514
14515 ret = init_pervl_scs(dd);
14516 if (ret)
14517 goto bail_cleanup;
14518
14519 /* sdma init */
14520 for (i = 0; i < dd->num_pports; ++i) {
14521 ret = sdma_init(dd, i);
14522 if (ret)
14523 goto bail_cleanup;
14524 }
14525
14526 /* use contexts created by hfi1_create_ctxts */
14527 ret = set_up_interrupts(dd);
14528 if (ret)
14529 goto bail_cleanup;
14530
14531 /* set up LCB access - must be after set_up_interrupts() */
14532 init_lcb_access(dd);
14533
14534 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14535 dd->base_guid & 0xFFFFFF);
14536
14537 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14538 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14539 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14540
14541 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14542 if (ret)
14543 goto bail_clear_intr;
14544 check_fabric_firmware_versions(dd);
14545
14546 thermal_init(dd);
14547
14548 ret = init_cntrs(dd);
14549 if (ret)
14550 goto bail_clear_intr;
14551
14552 ret = init_rcverr(dd);
14553 if (ret)
14554 goto bail_free_cntrs;
14555
14556 ret = eprom_init(dd);
14557 if (ret)
14558 goto bail_free_rcverr;
14559
14560 goto bail;
14561
14562bail_free_rcverr:
14563 free_rcverr(dd);
14564bail_free_cntrs:
14565 free_cntrs(dd);
14566bail_clear_intr:
14567 clean_up_interrupts(dd);
14568bail_cleanup:
14569 hfi1_pcie_ddcleanup(dd);
14570bail_free:
14571 hfi1_free_devdata(dd);
14572 dd = ERR_PTR(ret);
14573bail:
14574 return dd;
14575}
14576
14577static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14578 u32 dw_len)
14579{
14580 u32 delta_cycles;
14581 u32 current_egress_rate = ppd->current_egress_rate;
14582 /* rates here are in units of 10^6 bits/sec */
14583
14584 if (desired_egress_rate == -1)
14585 return 0; /* shouldn't happen */
14586
14587 if (desired_egress_rate >= current_egress_rate)
14588 return 0; /* we can't help go faster, only slower */
14589
14590 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14591 egress_cycles(dw_len * 4, current_egress_rate);
14592
14593 return (u16)delta_cycles;
14594}
14595
Mike Marciniszyn77241052015-07-30 15:17:43 -040014596/**
14597 * create_pbc - build a pbc for transmission
14598 * @flags: special case flags or-ed in built pbc
14599 * @srate: static rate
14600 * @vl: vl
14601 * @dwlen: dword length (header words + data words + pbc words)
14602 *
14603 * Create a PBC with the given flags, rate, VL, and length.
14604 *
14605 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14606 * for verbs, which does not use this PSM feature. The lone other caller
14607 * is for the diagnostic interface which calls this if the user does not
14608 * supply their own PBC.
14609 */
14610u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14611 u32 dw_len)
14612{
14613 u64 pbc, delay = 0;
14614
14615 if (unlikely(srate_mbs))
14616 delay = delay_cycles(ppd, srate_mbs, dw_len);
14617
14618 pbc = flags
14619 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14620 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14621 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14622 | (dw_len & PBC_LENGTH_DWS_MASK)
14623 << PBC_LENGTH_DWS_SHIFT;
14624
14625 return pbc;
14626}
14627
14628#define SBUS_THERMAL 0x4f
14629#define SBUS_THERM_MONITOR_MODE 0x1
14630
14631#define THERM_FAILURE(dev, ret, reason) \
14632 dd_dev_err((dd), \
14633 "Thermal sensor initialization failed: %s (%d)\n", \
14634 (reason), (ret))
14635
14636/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070014637 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014638 *
14639 * After initialization, enable polling of thermal sensor through
14640 * SBus interface. In order for this to work, the SBus Master
14641 * firmware has to be loaded due to the fact that the HW polling
14642 * logic uses SBus interrupts, which are not supported with
14643 * default firmware. Otherwise, no data will be returned through
14644 * the ASIC_STS_THERM CSR.
14645 */
14646static int thermal_init(struct hfi1_devdata *dd)
14647{
14648 int ret = 0;
14649
14650 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080014651 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014652 return ret;
14653
Dean Luick576531f2016-03-05 08:50:01 -080014654 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14655 if (ret) {
14656 THERM_FAILURE(dd, ret, "Acquire SBus");
14657 return ret;
14658 }
14659
Mike Marciniszyn77241052015-07-30 15:17:43 -040014660 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014661 /* Disable polling of thermal readings */
14662 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14663 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014664 /* Thermal Sensor Initialization */
14665 /* Step 1: Reset the Thermal SBus Receiver */
14666 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14667 RESET_SBUS_RECEIVER, 0);
14668 if (ret) {
14669 THERM_FAILURE(dd, ret, "Bus Reset");
14670 goto done;
14671 }
14672 /* Step 2: Set Reset bit in Thermal block */
14673 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14674 WRITE_SBUS_RECEIVER, 0x1);
14675 if (ret) {
14676 THERM_FAILURE(dd, ret, "Therm Block Reset");
14677 goto done;
14678 }
14679 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14680 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14681 WRITE_SBUS_RECEIVER, 0x32);
14682 if (ret) {
14683 THERM_FAILURE(dd, ret, "Write Clock Div");
14684 goto done;
14685 }
14686 /* Step 4: Select temperature mode */
14687 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14688 WRITE_SBUS_RECEIVER,
14689 SBUS_THERM_MONITOR_MODE);
14690 if (ret) {
14691 THERM_FAILURE(dd, ret, "Write Mode Sel");
14692 goto done;
14693 }
14694 /* Step 5: De-assert block reset and start conversion */
14695 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14696 WRITE_SBUS_RECEIVER, 0x2);
14697 if (ret) {
14698 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14699 goto done;
14700 }
14701 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14702 msleep(22);
14703
14704 /* Enable polling of thermal readings */
14705 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080014706
14707 /* Set initialized flag */
14708 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14709 if (ret)
14710 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14711
Mike Marciniszyn77241052015-07-30 15:17:43 -040014712done:
Dean Luick576531f2016-03-05 08:50:01 -080014713 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014714 return ret;
14715}
14716
14717static void handle_temp_err(struct hfi1_devdata *dd)
14718{
14719 struct hfi1_pportdata *ppd = &dd->pport[0];
14720 /*
14721 * Thermal Critical Interrupt
14722 * Put the device into forced freeze mode, take link down to
14723 * offline, and put DC into reset.
14724 */
14725 dd_dev_emerg(dd,
14726 "Critical temperature reached! Forcing device into freeze mode!\n");
14727 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014728 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014729 /*
14730 * Shut DC down as much and as quickly as possible.
14731 *
14732 * Step 1: Take the link down to OFFLINE. This will cause the
14733 * 8051 to put the Serdes in reset. However, we don't want to
14734 * go through the entire link state machine since we want to
14735 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14736 * but rather an attempt to save the chip.
14737 * Code below is almost the same as quiet_serdes() but avoids
14738 * all the extra work and the sleeps.
14739 */
14740 ppd->driver_link_ready = 0;
14741 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080014742 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14743 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014744 /*
14745 * Step 2: Shutdown LCB and 8051
14746 * After shutdown, do not restore DC_CFG_RESET value.
14747 */
14748 dc_shutdown(dd);
14749}