blob: 3487fdfb2c63e386029d386488583c7f1da9a44e [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040066
67#define NUM_IB_PORTS 1
68
69uint kdeth_qp;
70module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74module_param(num_vls, uint, S_IRUGO);
75MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77/*
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
83 */
84uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85module_param(rcv_intr_timeout, uint, S_IRUGO);
86MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88uint rcv_intr_count = 16; /* same as qib */
89module_param(rcv_intr_count, uint, S_IRUGO);
90MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92ushort link_crc_mask = SUPPORTED_CRCS;
93module_param(link_crc_mask, ushort, S_IRUGO);
94MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96uint loopback;
97module_param_named(loopback, loopback, uint, S_IRUGO);
98MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100/* Other driver tunables */
101uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102static ushort crc_14b_sideband = 1;
103static uint use_flr = 1;
104uint quick_linkup; /* skip LNI */
105
106struct flag_table {
107 u64 flag; /* the flag */
108 char *str; /* description string */
109 u16 extra; /* extra information */
110 u16 unused0;
111 u32 unused1;
112};
113
114/* str must be a string constant */
115#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116#define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118/* Send Error Consequences */
119#define SEC_WRITE_DROPPED 0x1
120#define SEC_PACKET_DROPPED 0x2
121#define SEC_SC_HALTED 0x4 /* per-context only */
122#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
123
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500125#define FIRST_KERNEL_KCTXT 1
Dean Luick372cc85a2016-04-12 11:30:51 -0700126/* sizes for both the QP and RSM map tables */
127#define NUM_MAP_ENTRIES 256
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800424 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800427 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800513/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514#define PORT_DISCARD_EGRESS_ERRS \
515 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518
Mike Marciniszyn77241052015-07-30 15:17:43 -0400519/*
520 * TXE Egress Error flags
521 */
522#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523static struct flag_table egress_err_status_flags[] = {
524/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526/* 2 reserved */
527/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531/* 6 reserved */
532/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536/* 9-10 reserved */
537/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 SEES(TX_SDMA0_DISALLOWED_PACKET)),
545/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 SEES(TX_SDMA1_DISALLOWED_PACKET)),
547/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 SEES(TX_SDMA2_DISALLOWED_PACKET)),
549/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 SEES(TX_SDMA3_DISALLOWED_PACKET)),
551/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 SEES(TX_SDMA4_DISALLOWED_PACKET)),
553/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 SEES(TX_SDMA5_DISALLOWED_PACKET)),
555/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 SEES(TX_SDMA6_DISALLOWED_PACKET)),
557/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 SEES(TX_SDMA7_DISALLOWED_PACKET)),
559/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 SEES(TX_SDMA8_DISALLOWED_PACKET)),
561/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 SEES(TX_SDMA9_DISALLOWED_PACKET)),
563/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 SEES(TX_SDMA10_DISALLOWED_PACKET)),
565/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 SEES(TX_SDMA11_DISALLOWED_PACKET)),
567/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 SEES(TX_SDMA12_DISALLOWED_PACKET)),
569/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 SEES(TX_SDMA13_DISALLOWED_PACKET)),
571/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 SEES(TX_SDMA14_DISALLOWED_PACKET)),
573/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 SEES(TX_SDMA15_DISALLOWED_PACKET)),
575/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618};
619
620/*
621 * TXE Egress Error Info flags
622 */
623#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624static struct flag_table egress_err_info_flags[] = {
625/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
626/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
627/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
634/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647};
648
649/* TXE Egress errors that cause an SPC freeze */
650#define ALL_TXE_EGRESS_FREEZE_ERR \
651 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 | SEES(TX_LAUNCH_CSR_PARITY) \
656 | SEES(TX_SBRD_CTL_CSR_PARITY) \
657 | SEES(TX_CONFIG_PARITY) \
658 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 | SEES(TX_CREDIT_RETURN_PARITY))
668
669/*
670 * TXE Send error flags
671 */
672#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500674/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400675/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677};
678
679/*
680 * TXE Send Context Error flags and consequences
681 */
682static struct flag_table sc_err_status_flags[] = {
683/* 0*/ FLAG_ENTRY("InconsistentSop",
684 SEC_PACKET_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686/* 1*/ FLAG_ENTRY("DisallowedPacket",
687 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692/* 3*/ FLAG_ENTRY("WriteOverflow",
693 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698/* 5-63 reserved*/
699};
700
701/*
702 * RXE Receive Error flags
703 */
704#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705static struct flag_table rxe_err_status_flags[] = {
706/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 RXES(RBUF_BLOCK_LIST_READ_UNC)),
728/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 RXES(RBUF_BLOCK_LIST_READ_COR)),
730/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 RXES(RBUF_CSR_QENT_CNT_PARITY)),
734/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 RXES(RBUF_FL_INITDONE_PARITY)),
751/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 RXES(LOOKUP_DES_PART1_UNC_COR)),
758/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 RXES(LOOKUP_DES_PART2_PARITY)),
760/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782};
783
784/* RXE errors that will trigger an SPC freeze */
785#define ALL_RXE_FREEZE_ERR \
786 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830
831#define RXE_FREEZE_ABORT_MASK \
832 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835
836/*
837 * DCC Error Flags
838 */
839#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840static struct flag_table dcc_err_flags[] = {
841 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887};
888
889/*
890 * LCB error flags
891 */
892#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893static struct flag_table lcb_err_flags[] = {
894/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 LCBE(REDUNDANT_FLIT_PARITY_ERR))
930};
931
932/*
933 * DC8051 Error Flags
934 */
935#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936static struct flag_table dc8051_err_flags[] = {
937 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800946 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400947 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948};
949
950/*
951 * DC8051 Information Error flags
952 *
953 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954 */
955static struct flag_table dc8051_info_err_flags[] = {
956 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
957 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
958 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
959 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800960 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400961 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
962 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
963 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
964 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
965 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
966 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -0800968 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
969 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001034static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001035static void handle_temp_err(struct hfi1_devdata *);
1036static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *);
Dean Luick8f000f72016-04-12 11:32:06 -07001038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001040static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001041
1042/*
1043 * Error interrupt table entry. This is used as input to the interrupt
1044 * "clear down" routine used for all second tier error interrupt register.
1045 * Second tier interrupt registers have a single bit representing them
1046 * in the top-level CceIntStatus.
1047 */
1048struct err_reg_info {
1049 u32 status; /* status CSR offset */
1050 u32 clear; /* clear CSR offset */
1051 u32 mask; /* mask CSR offset */
1052 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1053 const char *desc;
1054};
1055
1056#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1057#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1058#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1059
1060/*
1061 * Helpers for building HFI and DC error interrupt table entries. Different
1062 * helpers are needed because of inconsistent register names.
1063 */
1064#define EE(reg, handler, desc) \
1065 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1066 handler, desc }
1067#define DC_EE1(reg, handler, desc) \
1068 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1069#define DC_EE2(reg, handler, desc) \
1070 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1071
1072/*
1073 * Table of the "misc" grouping of error interrupts. Each entry refers to
1074 * another register containing more information.
1075 */
1076static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1077/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1078/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1079/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1080/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1081/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1082/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1083/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1084/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1085 /* the rest are reserved */
1086};
1087
1088/*
1089 * Index into the Various section of the interrupt sources
1090 * corresponding to the Critical Temperature interrupt.
1091 */
1092#define TCRIT_INT_SOURCE 4
1093
1094/*
1095 * SDMA error interrupt entry - refers to another register containing more
1096 * information.
1097 */
1098static const struct err_reg_info sdma_eng_err =
1099 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1100
1101static const struct err_reg_info various_err[NUM_VARIOUS] = {
1102/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1103/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1104/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1105/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1106/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1107 /* rest are reserved */
1108};
1109
1110/*
1111 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1112 * register can not be derived from the MTU value because 10K is not
1113 * a power of 2. Therefore, we need a constant. Everything else can
1114 * be calculated.
1115 */
1116#define DCC_CFG_PORT_MTU_CAP_10240 7
1117
1118/*
1119 * Table of the DC grouping of error interrupts. Each entry refers to
1120 * another register containing more information.
1121 */
1122static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1123/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1124/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1125/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1126/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1127 /* the rest are reserved */
1128};
1129
1130struct cntr_entry {
1131 /*
1132 * counter name
1133 */
1134 char *name;
1135
1136 /*
1137 * csr to read for name (if applicable)
1138 */
1139 u64 csr;
1140
1141 /*
1142 * offset into dd or ppd to store the counter's value
1143 */
1144 int offset;
1145
1146 /*
1147 * flags
1148 */
1149 u8 flags;
1150
1151 /*
1152 * accessor for stat element, context either dd or ppd
1153 */
Jubin John17fb4f22016-02-14 20:21:52 -08001154 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1155 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001156};
1157
1158#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1160
1161#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1162{ \
1163 name, \
1164 csr, \
1165 offset, \
1166 flags, \
1167 accessor \
1168}
1169
1170/* 32bit RXE */
1171#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1172CNTR_ELEM(#name, \
1173 (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 0, flags | CNTR_32BIT, \
1175 port_access_u32_csr)
1176
1177#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1178CNTR_ELEM(#name, \
1179 (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 0, flags | CNTR_32BIT, \
1181 dev_access_u32_csr)
1182
1183/* 64bit RXE */
1184#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186 (counter * 8 + RCV_COUNTER_ARRAY64), \
1187 0, flags, \
1188 port_access_u64_csr)
1189
1190#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1191CNTR_ELEM(#name, \
1192 (counter * 8 + RCV_COUNTER_ARRAY64), \
1193 0, flags, \
1194 dev_access_u64_csr)
1195
1196#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197#define OVR_ELM(ctx) \
1198CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001199 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001200 0, CNTR_NORMAL, port_access_u64_csr)
1201
1202/* 32bit TXE */
1203#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1204CNTR_ELEM(#name, \
1205 (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 0, flags | CNTR_32BIT, \
1207 port_access_u32_csr)
1208
1209/* 64bit TXE */
1210#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name, \
1212 (counter * 8 + SEND_COUNTER_ARRAY64), \
1213 0, flags, \
1214 port_access_u64_csr)
1215
1216# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1217CNTR_ELEM(#name,\
1218 counter * 8 + SEND_COUNTER_ARRAY64, \
1219 0, \
1220 flags, \
1221 dev_access_u64_csr)
1222
1223/* CCE */
1224#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226 (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1228 dev_access_u32_csr)
1229
1230#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 0, flags | CNTR_32BIT, \
1234 dev_access_u32_csr)
1235
1236/* DC */
1237#define DC_PERF_CNTR(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 counter, \
1240 0, \
1241 flags, \
1242 dev_access_u64_csr)
1243
1244#define DC_PERF_CNTR_LCB(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 counter, \
1247 0, \
1248 flags, \
1249 dc_access_lcb_cntr)
1250
1251/* ibp counters */
1252#define SW_IBP_CNTR(name, cntr) \
1253CNTR_ELEM(#name, \
1254 0, \
1255 0, \
1256 CNTR_SYNTH, \
1257 access_ibp_##cntr)
1258
1259u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1260{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001261 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301262 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001263 }
1264 return -1;
1265}
1266
1267void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1268{
1269 if (dd->flags & HFI1_PRESENT)
1270 writeq(value, (void __iomem *)dd->kregbase + offset);
1271}
1272
1273void __iomem *get_csr_addr(
1274 struct hfi1_devdata *dd,
1275 u32 offset)
1276{
1277 return (void __iomem *)dd->kregbase + offset;
1278}
1279
1280static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1281 int mode, u64 value)
1282{
1283 u64 ret;
1284
Mike Marciniszyn77241052015-07-30 15:17:43 -04001285 if (mode == CNTR_MODE_R) {
1286 ret = read_csr(dd, csr);
1287 } else if (mode == CNTR_MODE_W) {
1288 write_csr(dd, csr, value);
1289 ret = value;
1290 } else {
1291 dd_dev_err(dd, "Invalid cntr register access mode");
1292 return 0;
1293 }
1294
1295 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1296 return ret;
1297}
1298
1299/* Dev Access */
1300static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001301 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001302{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301303 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001304 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001305
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001306 if (entry->flags & CNTR_SDMA) {
1307 if (vl == CNTR_INVALID_VL)
1308 return 0;
1309 csr += 0x100 * vl;
1310 } else {
1311 if (vl != CNTR_INVALID_VL)
1312 return 0;
1313 }
1314 return read_write_csr(dd, csr, mode, data);
1315}
1316
1317static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1318 void *context, int idx, int mode, u64 data)
1319{
1320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1321
1322 if (dd->per_sdma && idx < dd->num_sdma)
1323 return dd->per_sdma[idx].err_cnt;
1324 return 0;
1325}
1326
1327static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1328 void *context, int idx, int mode, u64 data)
1329{
1330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1331
1332 if (dd->per_sdma && idx < dd->num_sdma)
1333 return dd->per_sdma[idx].sdma_int_cnt;
1334 return 0;
1335}
1336
1337static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1338 void *context, int idx, int mode, u64 data)
1339{
1340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1341
1342 if (dd->per_sdma && idx < dd->num_sdma)
1343 return dd->per_sdma[idx].idle_int_cnt;
1344 return 0;
1345}
1346
1347static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1348 void *context, int idx, int mode,
1349 u64 data)
1350{
1351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1352
1353 if (dd->per_sdma && idx < dd->num_sdma)
1354 return dd->per_sdma[idx].progress_int_cnt;
1355 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001356}
1357
1358static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001359 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001360{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301361 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001362
1363 u64 val = 0;
1364 u64 csr = entry->csr;
1365
1366 if (entry->flags & CNTR_VL) {
1367 if (vl == CNTR_INVALID_VL)
1368 return 0;
1369 csr += 8 * vl;
1370 } else {
1371 if (vl != CNTR_INVALID_VL)
1372 return 0;
1373 }
1374
1375 val = read_write_csr(dd, csr, mode, data);
1376 return val;
1377}
1378
1379static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001380 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001381{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301382 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001383 u32 csr = entry->csr;
1384 int ret = 0;
1385
1386 if (vl != CNTR_INVALID_VL)
1387 return 0;
1388 if (mode == CNTR_MODE_R)
1389 ret = read_lcb_csr(dd, csr, &data);
1390 else if (mode == CNTR_MODE_W)
1391 ret = write_lcb_csr(dd, csr, data);
1392
1393 if (ret) {
1394 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1395 return 0;
1396 }
1397
1398 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1399 return data;
1400}
1401
1402/* Port Access */
1403static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001404 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001405{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301406 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001407
1408 if (vl != CNTR_INVALID_VL)
1409 return 0;
1410 return read_write_csr(ppd->dd, entry->csr, mode, data);
1411}
1412
1413static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001414 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001415{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301416 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001417 u64 val;
1418 u64 csr = entry->csr;
1419
1420 if (entry->flags & CNTR_VL) {
1421 if (vl == CNTR_INVALID_VL)
1422 return 0;
1423 csr += 8 * vl;
1424 } else {
1425 if (vl != CNTR_INVALID_VL)
1426 return 0;
1427 }
1428 val = read_write_csr(ppd->dd, csr, mode, data);
1429 return val;
1430}
1431
1432/* Software defined */
1433static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1434 u64 data)
1435{
1436 u64 ret;
1437
1438 if (mode == CNTR_MODE_R) {
1439 ret = *cntr;
1440 } else if (mode == CNTR_MODE_W) {
1441 *cntr = data;
1442 ret = data;
1443 } else {
1444 dd_dev_err(dd, "Invalid cntr sw access mode");
1445 return 0;
1446 }
1447
1448 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1449
1450 return ret;
1451}
1452
1453static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001454 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001455{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301456 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001457
1458 if (vl != CNTR_INVALID_VL)
1459 return 0;
1460 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1461}
1462
1463static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001464 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001465{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301466 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001467
1468 if (vl != CNTR_INVALID_VL)
1469 return 0;
1470 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1471}
1472
Dean Luick6d014532015-12-01 15:38:23 -05001473static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1474 void *context, int vl, int mode,
1475 u64 data)
1476{
1477 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1478
1479 if (vl != CNTR_INVALID_VL)
1480 return 0;
1481 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1482}
1483
Mike Marciniszyn77241052015-07-30 15:17:43 -04001484static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001485 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001486{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001487 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1488 u64 zero = 0;
1489 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001490
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001491 if (vl == CNTR_INVALID_VL)
1492 counter = &ppd->port_xmit_discards;
1493 else if (vl >= 0 && vl < C_VL_COUNT)
1494 counter = &ppd->port_xmit_discards_vl[vl];
1495 else
1496 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001498 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499}
1500
1501static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001502 void *context, int vl, int mode,
1503 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001504{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301505 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506
1507 if (vl != CNTR_INVALID_VL)
1508 return 0;
1509
1510 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1511 mode, data);
1512}
1513
1514static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001515 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001516{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301517 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001518
1519 if (vl != CNTR_INVALID_VL)
1520 return 0;
1521
1522 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1523 mode, data);
1524}
1525
1526u64 get_all_cpu_total(u64 __percpu *cntr)
1527{
1528 int cpu;
1529 u64 counter = 0;
1530
1531 for_each_possible_cpu(cpu)
1532 counter += *per_cpu_ptr(cntr, cpu);
1533 return counter;
1534}
1535
1536static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1537 u64 __percpu *cntr,
1538 int vl, int mode, u64 data)
1539{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001540 u64 ret = 0;
1541
1542 if (vl != CNTR_INVALID_VL)
1543 return 0;
1544
1545 if (mode == CNTR_MODE_R) {
1546 ret = get_all_cpu_total(cntr) - *z_val;
1547 } else if (mode == CNTR_MODE_W) {
1548 /* A write can only zero the counter */
1549 if (data == 0)
1550 *z_val = get_all_cpu_total(cntr);
1551 else
1552 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1553 } else {
1554 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1555 return 0;
1556 }
1557
1558 return ret;
1559}
1560
1561static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1562 void *context, int vl, int mode, u64 data)
1563{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301564 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001565
1566 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1567 mode, data);
1568}
1569
1570static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001571 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001572{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301573 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574
1575 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1576 mode, data);
1577}
1578
1579static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1580 void *context, int vl, int mode, u64 data)
1581{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301582 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001583
1584 return dd->verbs_dev.n_piowait;
1585}
1586
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001587static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1588 void *context, int vl, int mode, u64 data)
1589{
1590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1591
1592 return dd->verbs_dev.n_piodrain;
1593}
1594
Mike Marciniszyn77241052015-07-30 15:17:43 -04001595static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1596 void *context, int vl, int mode, u64 data)
1597{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301598 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001599
1600 return dd->verbs_dev.n_txwait;
1601}
1602
1603static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1604 void *context, int vl, int mode, u64 data)
1605{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301606 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001607
1608 return dd->verbs_dev.n_kmem_wait;
1609}
1610
Dean Luickb4219222015-10-26 10:28:35 -04001611static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001612 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001613{
1614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1615
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001616 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1617 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001618}
1619
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001620/* Software counters for the error status bits within MISC_ERR_STATUS */
1621static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1622 void *context, int vl, int mode,
1623 u64 data)
1624{
1625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1626
1627 return dd->misc_err_status_cnt[12];
1628}
1629
1630static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1631 void *context, int vl, int mode,
1632 u64 data)
1633{
1634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635
1636 return dd->misc_err_status_cnt[11];
1637}
1638
1639static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1640 void *context, int vl, int mode,
1641 u64 data)
1642{
1643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644
1645 return dd->misc_err_status_cnt[10];
1646}
1647
1648static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1649 void *context, int vl,
1650 int mode, u64 data)
1651{
1652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653
1654 return dd->misc_err_status_cnt[9];
1655}
1656
1657static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1658 void *context, int vl, int mode,
1659 u64 data)
1660{
1661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662
1663 return dd->misc_err_status_cnt[8];
1664}
1665
1666static u64 access_misc_efuse_read_bad_addr_err_cnt(
1667 const struct cntr_entry *entry,
1668 void *context, int vl, int mode, u64 data)
1669{
1670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671
1672 return dd->misc_err_status_cnt[7];
1673}
1674
1675static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1676 void *context, int vl,
1677 int mode, u64 data)
1678{
1679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681 return dd->misc_err_status_cnt[6];
1682}
1683
1684static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1685 void *context, int vl, int mode,
1686 u64 data)
1687{
1688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689
1690 return dd->misc_err_status_cnt[5];
1691}
1692
1693static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1694 void *context, int vl, int mode,
1695 u64 data)
1696{
1697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698
1699 return dd->misc_err_status_cnt[4];
1700}
1701
1702static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1703 void *context, int vl,
1704 int mode, u64 data)
1705{
1706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708 return dd->misc_err_status_cnt[3];
1709}
1710
1711static u64 access_misc_csr_write_bad_addr_err_cnt(
1712 const struct cntr_entry *entry,
1713 void *context, int vl, int mode, u64 data)
1714{
1715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717 return dd->misc_err_status_cnt[2];
1718}
1719
1720static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1721 void *context, int vl,
1722 int mode, u64 data)
1723{
1724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1725
1726 return dd->misc_err_status_cnt[1];
1727}
1728
1729static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1730 void *context, int vl, int mode,
1731 u64 data)
1732{
1733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1734
1735 return dd->misc_err_status_cnt[0];
1736}
1737
1738/*
1739 * Software counter for the aggregate of
1740 * individual CceErrStatus counters
1741 */
1742static u64 access_sw_cce_err_status_aggregated_cnt(
1743 const struct cntr_entry *entry,
1744 void *context, int vl, int mode, u64 data)
1745{
1746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747
1748 return dd->sw_cce_err_status_aggregate;
1749}
1750
1751/*
1752 * Software counters corresponding to each of the
1753 * error status bits within CceErrStatus
1754 */
1755static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1756 void *context, int vl, int mode,
1757 u64 data)
1758{
1759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1760
1761 return dd->cce_err_status_cnt[40];
1762}
1763
1764static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1765 void *context, int vl, int mode,
1766 u64 data)
1767{
1768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769
1770 return dd->cce_err_status_cnt[39];
1771}
1772
1773static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1774 void *context, int vl, int mode,
1775 u64 data)
1776{
1777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778
1779 return dd->cce_err_status_cnt[38];
1780}
1781
1782static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1783 void *context, int vl, int mode,
1784 u64 data)
1785{
1786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788 return dd->cce_err_status_cnt[37];
1789}
1790
1791static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1792 void *context, int vl, int mode,
1793 u64 data)
1794{
1795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796
1797 return dd->cce_err_status_cnt[36];
1798}
1799
1800static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1801 const struct cntr_entry *entry,
1802 void *context, int vl, int mode, u64 data)
1803{
1804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805
1806 return dd->cce_err_status_cnt[35];
1807}
1808
1809static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1810 const struct cntr_entry *entry,
1811 void *context, int vl, int mode, u64 data)
1812{
1813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814
1815 return dd->cce_err_status_cnt[34];
1816}
1817
1818static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1819 void *context, int vl,
1820 int mode, u64 data)
1821{
1822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1823
1824 return dd->cce_err_status_cnt[33];
1825}
1826
1827static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1828 void *context, int vl, int mode,
1829 u64 data)
1830{
1831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1832
1833 return dd->cce_err_status_cnt[32];
1834}
1835
1836static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1837 void *context, int vl, int mode, u64 data)
1838{
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841 return dd->cce_err_status_cnt[31];
1842}
1843
1844static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1845 void *context, int vl, int mode,
1846 u64 data)
1847{
1848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849
1850 return dd->cce_err_status_cnt[30];
1851}
1852
1853static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1854 void *context, int vl, int mode,
1855 u64 data)
1856{
1857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858
1859 return dd->cce_err_status_cnt[29];
1860}
1861
1862static u64 access_pcic_transmit_back_parity_err_cnt(
1863 const struct cntr_entry *entry,
1864 void *context, int vl, int mode, u64 data)
1865{
1866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867
1868 return dd->cce_err_status_cnt[28];
1869}
1870
1871static u64 access_pcic_transmit_front_parity_err_cnt(
1872 const struct cntr_entry *entry,
1873 void *context, int vl, int mode, u64 data)
1874{
1875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876
1877 return dd->cce_err_status_cnt[27];
1878}
1879
1880static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1881 void *context, int vl, int mode,
1882 u64 data)
1883{
1884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885
1886 return dd->cce_err_status_cnt[26];
1887}
1888
1889static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1890 void *context, int vl, int mode,
1891 u64 data)
1892{
1893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894
1895 return dd->cce_err_status_cnt[25];
1896}
1897
1898static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1899 void *context, int vl, int mode,
1900 u64 data)
1901{
1902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903
1904 return dd->cce_err_status_cnt[24];
1905}
1906
1907static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1908 void *context, int vl, int mode,
1909 u64 data)
1910{
1911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912
1913 return dd->cce_err_status_cnt[23];
1914}
1915
1916static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1917 void *context, int vl,
1918 int mode, u64 data)
1919{
1920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921
1922 return dd->cce_err_status_cnt[22];
1923}
1924
1925static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1926 void *context, int vl, int mode,
1927 u64 data)
1928{
1929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930
1931 return dd->cce_err_status_cnt[21];
1932}
1933
1934static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1935 const struct cntr_entry *entry,
1936 void *context, int vl, int mode, u64 data)
1937{
1938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939
1940 return dd->cce_err_status_cnt[20];
1941}
1942
1943static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1944 void *context, int vl,
1945 int mode, u64 data)
1946{
1947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948
1949 return dd->cce_err_status_cnt[19];
1950}
1951
1952static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1953 void *context, int vl, int mode,
1954 u64 data)
1955{
1956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957
1958 return dd->cce_err_status_cnt[18];
1959}
1960
1961static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1962 void *context, int vl, int mode,
1963 u64 data)
1964{
1965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966
1967 return dd->cce_err_status_cnt[17];
1968}
1969
1970static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1971 void *context, int vl, int mode,
1972 u64 data)
1973{
1974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975
1976 return dd->cce_err_status_cnt[16];
1977}
1978
1979static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1980 void *context, int vl, int mode,
1981 u64 data)
1982{
1983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984
1985 return dd->cce_err_status_cnt[15];
1986}
1987
1988static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1989 void *context, int vl,
1990 int mode, u64 data)
1991{
1992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993
1994 return dd->cce_err_status_cnt[14];
1995}
1996
1997static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1998 void *context, int vl, int mode,
1999 u64 data)
2000{
2001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002
2003 return dd->cce_err_status_cnt[13];
2004}
2005
2006static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2007 const struct cntr_entry *entry,
2008 void *context, int vl, int mode, u64 data)
2009{
2010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011
2012 return dd->cce_err_status_cnt[12];
2013}
2014
2015static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2016 const struct cntr_entry *entry,
2017 void *context, int vl, int mode, u64 data)
2018{
2019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020
2021 return dd->cce_err_status_cnt[11];
2022}
2023
2024static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2025 const struct cntr_entry *entry,
2026 void *context, int vl, int mode, u64 data)
2027{
2028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029
2030 return dd->cce_err_status_cnt[10];
2031}
2032
2033static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2034 const struct cntr_entry *entry,
2035 void *context, int vl, int mode, u64 data)
2036{
2037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038
2039 return dd->cce_err_status_cnt[9];
2040}
2041
2042static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2043 const struct cntr_entry *entry,
2044 void *context, int vl, int mode, u64 data)
2045{
2046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047
2048 return dd->cce_err_status_cnt[8];
2049}
2050
2051static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2052 void *context, int vl,
2053 int mode, u64 data)
2054{
2055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056
2057 return dd->cce_err_status_cnt[7];
2058}
2059
2060static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2061 const struct cntr_entry *entry,
2062 void *context, int vl, int mode, u64 data)
2063{
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066 return dd->cce_err_status_cnt[6];
2067}
2068
2069static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2070 void *context, int vl, int mode,
2071 u64 data)
2072{
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075 return dd->cce_err_status_cnt[5];
2076}
2077
2078static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl, int mode,
2080 u64 data)
2081{
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084 return dd->cce_err_status_cnt[4];
2085}
2086
2087static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2088 const struct cntr_entry *entry,
2089 void *context, int vl, int mode, u64 data)
2090{
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093 return dd->cce_err_status_cnt[3];
2094}
2095
2096static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2097 void *context, int vl,
2098 int mode, u64 data)
2099{
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102 return dd->cce_err_status_cnt[2];
2103}
2104
2105static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2106 void *context, int vl,
2107 int mode, u64 data)
2108{
2109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2110
2111 return dd->cce_err_status_cnt[1];
2112}
2113
2114static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2115 void *context, int vl, int mode,
2116 u64 data)
2117{
2118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2119
2120 return dd->cce_err_status_cnt[0];
2121}
2122
2123/*
2124 * Software counters corresponding to each of the
2125 * error status bits within RcvErrStatus
2126 */
2127static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2128 void *context, int vl, int mode,
2129 u64 data)
2130{
2131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133 return dd->rcv_err_status_cnt[63];
2134}
2135
2136static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2137 void *context, int vl,
2138 int mode, u64 data)
2139{
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142 return dd->rcv_err_status_cnt[62];
2143}
2144
2145static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2146 void *context, int vl, int mode,
2147 u64 data)
2148{
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151 return dd->rcv_err_status_cnt[61];
2152}
2153
2154static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl, int mode,
2156 u64 data)
2157{
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160 return dd->rcv_err_status_cnt[60];
2161}
2162
2163static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2164 void *context, int vl,
2165 int mode, u64 data)
2166{
2167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169 return dd->rcv_err_status_cnt[59];
2170}
2171
2172static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2173 void *context, int vl,
2174 int mode, u64 data)
2175{
2176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178 return dd->rcv_err_status_cnt[58];
2179}
2180
2181static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2182 void *context, int vl, int mode,
2183 u64 data)
2184{
2185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187 return dd->rcv_err_status_cnt[57];
2188}
2189
2190static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2191 void *context, int vl, int mode,
2192 u64 data)
2193{
2194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196 return dd->rcv_err_status_cnt[56];
2197}
2198
2199static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2200 void *context, int vl, int mode,
2201 u64 data)
2202{
2203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205 return dd->rcv_err_status_cnt[55];
2206}
2207
2208static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2209 const struct cntr_entry *entry,
2210 void *context, int vl, int mode, u64 data)
2211{
2212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214 return dd->rcv_err_status_cnt[54];
2215}
2216
2217static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2218 const struct cntr_entry *entry,
2219 void *context, int vl, int mode, u64 data)
2220{
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223 return dd->rcv_err_status_cnt[53];
2224}
2225
2226static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2227 void *context, int vl,
2228 int mode, u64 data)
2229{
2230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231
2232 return dd->rcv_err_status_cnt[52];
2233}
2234
2235static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2236 void *context, int vl,
2237 int mode, u64 data)
2238{
2239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240
2241 return dd->rcv_err_status_cnt[51];
2242}
2243
2244static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2245 void *context, int vl,
2246 int mode, u64 data)
2247{
2248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249
2250 return dd->rcv_err_status_cnt[50];
2251}
2252
2253static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2254 void *context, int vl,
2255 int mode, u64 data)
2256{
2257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258
2259 return dd->rcv_err_status_cnt[49];
2260}
2261
2262static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2263 void *context, int vl,
2264 int mode, u64 data)
2265{
2266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267
2268 return dd->rcv_err_status_cnt[48];
2269}
2270
2271static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2272 void *context, int vl,
2273 int mode, u64 data)
2274{
2275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276
2277 return dd->rcv_err_status_cnt[47];
2278}
2279
2280static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2281 void *context, int vl, int mode,
2282 u64 data)
2283{
2284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285
2286 return dd->rcv_err_status_cnt[46];
2287}
2288
2289static u64 access_rx_hq_intr_csr_parity_err_cnt(
2290 const struct cntr_entry *entry,
2291 void *context, int vl, int mode, u64 data)
2292{
2293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294
2295 return dd->rcv_err_status_cnt[45];
2296}
2297
2298static u64 access_rx_lookup_csr_parity_err_cnt(
2299 const struct cntr_entry *entry,
2300 void *context, int vl, int mode, u64 data)
2301{
2302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303
2304 return dd->rcv_err_status_cnt[44];
2305}
2306
2307static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2308 const struct cntr_entry *entry,
2309 void *context, int vl, int mode, u64 data)
2310{
2311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312
2313 return dd->rcv_err_status_cnt[43];
2314}
2315
2316static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2317 const struct cntr_entry *entry,
2318 void *context, int vl, int mode, u64 data)
2319{
2320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321
2322 return dd->rcv_err_status_cnt[42];
2323}
2324
2325static u64 access_rx_lookup_des_part2_parity_err_cnt(
2326 const struct cntr_entry *entry,
2327 void *context, int vl, int mode, u64 data)
2328{
2329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330
2331 return dd->rcv_err_status_cnt[41];
2332}
2333
2334static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2335 const struct cntr_entry *entry,
2336 void *context, int vl, int mode, u64 data)
2337{
2338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339
2340 return dd->rcv_err_status_cnt[40];
2341}
2342
2343static u64 access_rx_lookup_des_part1_unc_err_cnt(
2344 const struct cntr_entry *entry,
2345 void *context, int vl, int mode, u64 data)
2346{
2347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348
2349 return dd->rcv_err_status_cnt[39];
2350}
2351
2352static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2353 const struct cntr_entry *entry,
2354 void *context, int vl, int mode, u64 data)
2355{
2356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357
2358 return dd->rcv_err_status_cnt[38];
2359}
2360
2361static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2362 const struct cntr_entry *entry,
2363 void *context, int vl, int mode, u64 data)
2364{
2365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366
2367 return dd->rcv_err_status_cnt[37];
2368}
2369
2370static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2371 const struct cntr_entry *entry,
2372 void *context, int vl, int mode, u64 data)
2373{
2374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375
2376 return dd->rcv_err_status_cnt[36];
2377}
2378
2379static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2380 const struct cntr_entry *entry,
2381 void *context, int vl, int mode, u64 data)
2382{
2383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384
2385 return dd->rcv_err_status_cnt[35];
2386}
2387
2388static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2389 const struct cntr_entry *entry,
2390 void *context, int vl, int mode, u64 data)
2391{
2392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393
2394 return dd->rcv_err_status_cnt[34];
2395}
2396
2397static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2398 const struct cntr_entry *entry,
2399 void *context, int vl, int mode, u64 data)
2400{
2401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402
2403 return dd->rcv_err_status_cnt[33];
2404}
2405
2406static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2407 void *context, int vl, int mode,
2408 u64 data)
2409{
2410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411
2412 return dd->rcv_err_status_cnt[32];
2413}
2414
2415static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2416 void *context, int vl, int mode,
2417 u64 data)
2418{
2419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420
2421 return dd->rcv_err_status_cnt[31];
2422}
2423
2424static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2425 void *context, int vl, int mode,
2426 u64 data)
2427{
2428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429
2430 return dd->rcv_err_status_cnt[30];
2431}
2432
2433static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2434 void *context, int vl, int mode,
2435 u64 data)
2436{
2437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438
2439 return dd->rcv_err_status_cnt[29];
2440}
2441
2442static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2443 void *context, int vl,
2444 int mode, u64 data)
2445{
2446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447
2448 return dd->rcv_err_status_cnt[28];
2449}
2450
2451static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2452 const struct cntr_entry *entry,
2453 void *context, int vl, int mode, u64 data)
2454{
2455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456
2457 return dd->rcv_err_status_cnt[27];
2458}
2459
2460static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2461 const struct cntr_entry *entry,
2462 void *context, int vl, int mode, u64 data)
2463{
2464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465
2466 return dd->rcv_err_status_cnt[26];
2467}
2468
2469static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2470 const struct cntr_entry *entry,
2471 void *context, int vl, int mode, u64 data)
2472{
2473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474
2475 return dd->rcv_err_status_cnt[25];
2476}
2477
2478static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2479 const struct cntr_entry *entry,
2480 void *context, int vl, int mode, u64 data)
2481{
2482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483
2484 return dd->rcv_err_status_cnt[24];
2485}
2486
2487static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2488 const struct cntr_entry *entry,
2489 void *context, int vl, int mode, u64 data)
2490{
2491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492
2493 return dd->rcv_err_status_cnt[23];
2494}
2495
2496static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2497 const struct cntr_entry *entry,
2498 void *context, int vl, int mode, u64 data)
2499{
2500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501
2502 return dd->rcv_err_status_cnt[22];
2503}
2504
2505static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2506 const struct cntr_entry *entry,
2507 void *context, int vl, int mode, u64 data)
2508{
2509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510
2511 return dd->rcv_err_status_cnt[21];
2512}
2513
2514static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2515 const struct cntr_entry *entry,
2516 void *context, int vl, int mode, u64 data)
2517{
2518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519
2520 return dd->rcv_err_status_cnt[20];
2521}
2522
2523static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2524 const struct cntr_entry *entry,
2525 void *context, int vl, int mode, u64 data)
2526{
2527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528
2529 return dd->rcv_err_status_cnt[19];
2530}
2531
2532static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2533 void *context, int vl,
2534 int mode, u64 data)
2535{
2536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537
2538 return dd->rcv_err_status_cnt[18];
2539}
2540
2541static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2542 void *context, int vl,
2543 int mode, u64 data)
2544{
2545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546
2547 return dd->rcv_err_status_cnt[17];
2548}
2549
2550static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2551 const struct cntr_entry *entry,
2552 void *context, int vl, int mode, u64 data)
2553{
2554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555
2556 return dd->rcv_err_status_cnt[16];
2557}
2558
2559static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2560 const struct cntr_entry *entry,
2561 void *context, int vl, int mode, u64 data)
2562{
2563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564
2565 return dd->rcv_err_status_cnt[15];
2566}
2567
2568static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2569 void *context, int vl,
2570 int mode, u64 data)
2571{
2572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573
2574 return dd->rcv_err_status_cnt[14];
2575}
2576
2577static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2578 void *context, int vl,
2579 int mode, u64 data)
2580{
2581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582
2583 return dd->rcv_err_status_cnt[13];
2584}
2585
2586static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2587 void *context, int vl, int mode,
2588 u64 data)
2589{
2590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591
2592 return dd->rcv_err_status_cnt[12];
2593}
2594
2595static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2596 void *context, int vl, int mode,
2597 u64 data)
2598{
2599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600
2601 return dd->rcv_err_status_cnt[11];
2602}
2603
2604static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2605 void *context, int vl, int mode,
2606 u64 data)
2607{
2608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609
2610 return dd->rcv_err_status_cnt[10];
2611}
2612
2613static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2614 void *context, int vl, int mode,
2615 u64 data)
2616{
2617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618
2619 return dd->rcv_err_status_cnt[9];
2620}
2621
2622static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2623 void *context, int vl, int mode,
2624 u64 data)
2625{
2626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627
2628 return dd->rcv_err_status_cnt[8];
2629}
2630
2631static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2632 const struct cntr_entry *entry,
2633 void *context, int vl, int mode, u64 data)
2634{
2635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636
2637 return dd->rcv_err_status_cnt[7];
2638}
2639
2640static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2641 const struct cntr_entry *entry,
2642 void *context, int vl, int mode, u64 data)
2643{
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646 return dd->rcv_err_status_cnt[6];
2647}
2648
2649static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2650 void *context, int vl, int mode,
2651 u64 data)
2652{
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655 return dd->rcv_err_status_cnt[5];
2656}
2657
2658static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2659 void *context, int vl, int mode,
2660 u64 data)
2661{
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664 return dd->rcv_err_status_cnt[4];
2665}
2666
2667static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2668 void *context, int vl, int mode,
2669 u64 data)
2670{
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673 return dd->rcv_err_status_cnt[3];
2674}
2675
2676static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2678 u64 data)
2679{
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682 return dd->rcv_err_status_cnt[2];
2683}
2684
2685static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2686 void *context, int vl, int mode,
2687 u64 data)
2688{
2689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2690
2691 return dd->rcv_err_status_cnt[1];
2692}
2693
2694static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2695 void *context, int vl, int mode,
2696 u64 data)
2697{
2698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2699
2700 return dd->rcv_err_status_cnt[0];
2701}
2702
2703/*
2704 * Software counters corresponding to each of the
2705 * error status bits within SendPioErrStatus
2706 */
2707static u64 access_pio_pec_sop_head_parity_err_cnt(
2708 const struct cntr_entry *entry,
2709 void *context, int vl, int mode, u64 data)
2710{
2711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713 return dd->send_pio_err_status_cnt[35];
2714}
2715
2716static u64 access_pio_pcc_sop_head_parity_err_cnt(
2717 const struct cntr_entry *entry,
2718 void *context, int vl, int mode, u64 data)
2719{
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722 return dd->send_pio_err_status_cnt[34];
2723}
2724
2725static u64 access_pio_last_returned_cnt_parity_err_cnt(
2726 const struct cntr_entry *entry,
2727 void *context, int vl, int mode, u64 data)
2728{
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731 return dd->send_pio_err_status_cnt[33];
2732}
2733
2734static u64 access_pio_current_free_cnt_parity_err_cnt(
2735 const struct cntr_entry *entry,
2736 void *context, int vl, int mode, u64 data)
2737{
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740 return dd->send_pio_err_status_cnt[32];
2741}
2742
2743static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2744 void *context, int vl, int mode,
2745 u64 data)
2746{
2747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749 return dd->send_pio_err_status_cnt[31];
2750}
2751
2752static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2753 void *context, int vl, int mode,
2754 u64 data)
2755{
2756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758 return dd->send_pio_err_status_cnt[30];
2759}
2760
2761static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2762 void *context, int vl, int mode,
2763 u64 data)
2764{
2765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767 return dd->send_pio_err_status_cnt[29];
2768}
2769
2770static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2771 const struct cntr_entry *entry,
2772 void *context, int vl, int mode, u64 data)
2773{
2774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776 return dd->send_pio_err_status_cnt[28];
2777}
2778
2779static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2780 void *context, int vl, int mode,
2781 u64 data)
2782{
2783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785 return dd->send_pio_err_status_cnt[27];
2786}
2787
2788static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2789 void *context, int vl, int mode,
2790 u64 data)
2791{
2792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794 return dd->send_pio_err_status_cnt[26];
2795}
2796
2797static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2798 void *context, int vl,
2799 int mode, u64 data)
2800{
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803 return dd->send_pio_err_status_cnt[25];
2804}
2805
2806static u64 access_pio_block_qw_count_parity_err_cnt(
2807 const struct cntr_entry *entry,
2808 void *context, int vl, int mode, u64 data)
2809{
2810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811
2812 return dd->send_pio_err_status_cnt[24];
2813}
2814
2815static u64 access_pio_write_qw_valid_parity_err_cnt(
2816 const struct cntr_entry *entry,
2817 void *context, int vl, int mode, u64 data)
2818{
2819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820
2821 return dd->send_pio_err_status_cnt[23];
2822}
2823
2824static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2825 void *context, int vl, int mode,
2826 u64 data)
2827{
2828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829
2830 return dd->send_pio_err_status_cnt[22];
2831}
2832
2833static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2834 void *context, int vl,
2835 int mode, u64 data)
2836{
2837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838
2839 return dd->send_pio_err_status_cnt[21];
2840}
2841
2842static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2843 void *context, int vl,
2844 int mode, u64 data)
2845{
2846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847
2848 return dd->send_pio_err_status_cnt[20];
2849}
2850
2851static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2852 void *context, int vl,
2853 int mode, u64 data)
2854{
2855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856
2857 return dd->send_pio_err_status_cnt[19];
2858}
2859
2860static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2861 const struct cntr_entry *entry,
2862 void *context, int vl, int mode, u64 data)
2863{
2864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865
2866 return dd->send_pio_err_status_cnt[18];
2867}
2868
2869static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2870 void *context, int vl, int mode,
2871 u64 data)
2872{
2873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874
2875 return dd->send_pio_err_status_cnt[17];
2876}
2877
2878static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2879 void *context, int vl, int mode,
2880 u64 data)
2881{
2882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883
2884 return dd->send_pio_err_status_cnt[16];
2885}
2886
2887static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2888 const struct cntr_entry *entry,
2889 void *context, int vl, int mode, u64 data)
2890{
2891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892
2893 return dd->send_pio_err_status_cnt[15];
2894}
2895
2896static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2897 const struct cntr_entry *entry,
2898 void *context, int vl, int mode, u64 data)
2899{
2900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901
2902 return dd->send_pio_err_status_cnt[14];
2903}
2904
2905static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2906 const struct cntr_entry *entry,
2907 void *context, int vl, int mode, u64 data)
2908{
2909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910
2911 return dd->send_pio_err_status_cnt[13];
2912}
2913
2914static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2915 const struct cntr_entry *entry,
2916 void *context, int vl, int mode, u64 data)
2917{
2918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919
2920 return dd->send_pio_err_status_cnt[12];
2921}
2922
2923static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2924 const struct cntr_entry *entry,
2925 void *context, int vl, int mode, u64 data)
2926{
2927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928
2929 return dd->send_pio_err_status_cnt[11];
2930}
2931
2932static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2933 const struct cntr_entry *entry,
2934 void *context, int vl, int mode, u64 data)
2935{
2936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937
2938 return dd->send_pio_err_status_cnt[10];
2939}
2940
2941static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2942 const struct cntr_entry *entry,
2943 void *context, int vl, int mode, u64 data)
2944{
2945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946
2947 return dd->send_pio_err_status_cnt[9];
2948}
2949
2950static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2951 const struct cntr_entry *entry,
2952 void *context, int vl, int mode, u64 data)
2953{
2954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955
2956 return dd->send_pio_err_status_cnt[8];
2957}
2958
2959static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2960 const struct cntr_entry *entry,
2961 void *context, int vl, int mode, u64 data)
2962{
2963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964
2965 return dd->send_pio_err_status_cnt[7];
2966}
2967
2968static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2969 void *context, int vl, int mode,
2970 u64 data)
2971{
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974 return dd->send_pio_err_status_cnt[6];
2975}
2976
2977static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2978 void *context, int vl, int mode,
2979 u64 data)
2980{
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983 return dd->send_pio_err_status_cnt[5];
2984}
2985
2986static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2987 void *context, int vl, int mode,
2988 u64 data)
2989{
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992 return dd->send_pio_err_status_cnt[4];
2993}
2994
2995static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2996 void *context, int vl, int mode,
2997 u64 data)
2998{
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001 return dd->send_pio_err_status_cnt[3];
3002}
3003
3004static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3005 void *context, int vl, int mode,
3006 u64 data)
3007{
3008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009
3010 return dd->send_pio_err_status_cnt[2];
3011}
3012
3013static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3014 void *context, int vl,
3015 int mode, u64 data)
3016{
3017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3018
3019 return dd->send_pio_err_status_cnt[1];
3020}
3021
3022static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3023 void *context, int vl, int mode,
3024 u64 data)
3025{
3026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3027
3028 return dd->send_pio_err_status_cnt[0];
3029}
3030
3031/*
3032 * Software counters corresponding to each of the
3033 * error status bits within SendDmaErrStatus
3034 */
3035static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3036 const struct cntr_entry *entry,
3037 void *context, int vl, int mode, u64 data)
3038{
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041 return dd->send_dma_err_status_cnt[3];
3042}
3043
3044static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3045 const struct cntr_entry *entry,
3046 void *context, int vl, int mode, u64 data)
3047{
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050 return dd->send_dma_err_status_cnt[2];
3051}
3052
3053static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3054 void *context, int vl, int mode,
3055 u64 data)
3056{
3057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059 return dd->send_dma_err_status_cnt[1];
3060}
3061
3062static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3063 void *context, int vl, int mode,
3064 u64 data)
3065{
3066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068 return dd->send_dma_err_status_cnt[0];
3069}
3070
3071/*
3072 * Software counters corresponding to each of the
3073 * error status bits within SendEgressErrStatus
3074 */
3075static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3076 const struct cntr_entry *entry,
3077 void *context, int vl, int mode, u64 data)
3078{
3079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080
3081 return dd->send_egress_err_status_cnt[63];
3082}
3083
3084static u64 access_tx_read_sdma_memory_csr_err_cnt(
3085 const struct cntr_entry *entry,
3086 void *context, int vl, int mode, u64 data)
3087{
3088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090 return dd->send_egress_err_status_cnt[62];
3091}
3092
3093static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3094 void *context, int vl, int mode,
3095 u64 data)
3096{
3097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099 return dd->send_egress_err_status_cnt[61];
3100}
3101
3102static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3103 void *context, int vl,
3104 int mode, u64 data)
3105{
3106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108 return dd->send_egress_err_status_cnt[60];
3109}
3110
3111static u64 access_tx_read_sdma_memory_cor_err_cnt(
3112 const struct cntr_entry *entry,
3113 void *context, int vl, int mode, u64 data)
3114{
3115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116
3117 return dd->send_egress_err_status_cnt[59];
3118}
3119
3120static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3121 void *context, int vl, int mode,
3122 u64 data)
3123{
3124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125
3126 return dd->send_egress_err_status_cnt[58];
3127}
3128
3129static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3130 void *context, int vl, int mode,
3131 u64 data)
3132{
3133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134
3135 return dd->send_egress_err_status_cnt[57];
3136}
3137
3138static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3139 void *context, int vl, int mode,
3140 u64 data)
3141{
3142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144 return dd->send_egress_err_status_cnt[56];
3145}
3146
3147static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3148 void *context, int vl, int mode,
3149 u64 data)
3150{
3151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153 return dd->send_egress_err_status_cnt[55];
3154}
3155
3156static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3157 void *context, int vl, int mode,
3158 u64 data)
3159{
3160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162 return dd->send_egress_err_status_cnt[54];
3163}
3164
3165static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3166 void *context, int vl, int mode,
3167 u64 data)
3168{
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171 return dd->send_egress_err_status_cnt[53];
3172}
3173
3174static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3175 void *context, int vl, int mode,
3176 u64 data)
3177{
3178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179
3180 return dd->send_egress_err_status_cnt[52];
3181}
3182
3183static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3184 void *context, int vl, int mode,
3185 u64 data)
3186{
3187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188
3189 return dd->send_egress_err_status_cnt[51];
3190}
3191
3192static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3193 void *context, int vl, int mode,
3194 u64 data)
3195{
3196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197
3198 return dd->send_egress_err_status_cnt[50];
3199}
3200
3201static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3202 void *context, int vl, int mode,
3203 u64 data)
3204{
3205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206
3207 return dd->send_egress_err_status_cnt[49];
3208}
3209
3210static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3211 void *context, int vl, int mode,
3212 u64 data)
3213{
3214 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215
3216 return dd->send_egress_err_status_cnt[48];
3217}
3218
3219static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3220 void *context, int vl, int mode,
3221 u64 data)
3222{
3223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224
3225 return dd->send_egress_err_status_cnt[47];
3226}
3227
3228static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3229 void *context, int vl, int mode,
3230 u64 data)
3231{
3232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233
3234 return dd->send_egress_err_status_cnt[46];
3235}
3236
3237static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3238 void *context, int vl, int mode,
3239 u64 data)
3240{
3241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242
3243 return dd->send_egress_err_status_cnt[45];
3244}
3245
3246static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3247 void *context, int vl,
3248 int mode, u64 data)
3249{
3250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251
3252 return dd->send_egress_err_status_cnt[44];
3253}
3254
3255static u64 access_tx_read_sdma_memory_unc_err_cnt(
3256 const struct cntr_entry *entry,
3257 void *context, int vl, int mode, u64 data)
3258{
3259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260
3261 return dd->send_egress_err_status_cnt[43];
3262}
3263
3264static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3265 void *context, int vl, int mode,
3266 u64 data)
3267{
3268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269
3270 return dd->send_egress_err_status_cnt[42];
3271}
3272
3273static u64 access_tx_credit_return_partiy_err_cnt(
3274 const struct cntr_entry *entry,
3275 void *context, int vl, int mode, u64 data)
3276{
3277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278
3279 return dd->send_egress_err_status_cnt[41];
3280}
3281
3282static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3283 const struct cntr_entry *entry,
3284 void *context, int vl, int mode, u64 data)
3285{
3286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287
3288 return dd->send_egress_err_status_cnt[40];
3289}
3290
3291static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3292 const struct cntr_entry *entry,
3293 void *context, int vl, int mode, u64 data)
3294{
3295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296
3297 return dd->send_egress_err_status_cnt[39];
3298}
3299
3300static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3301 const struct cntr_entry *entry,
3302 void *context, int vl, int mode, u64 data)
3303{
3304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305
3306 return dd->send_egress_err_status_cnt[38];
3307}
3308
3309static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3310 const struct cntr_entry *entry,
3311 void *context, int vl, int mode, u64 data)
3312{
3313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314
3315 return dd->send_egress_err_status_cnt[37];
3316}
3317
3318static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3319 const struct cntr_entry *entry,
3320 void *context, int vl, int mode, u64 data)
3321{
3322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323
3324 return dd->send_egress_err_status_cnt[36];
3325}
3326
3327static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3328 const struct cntr_entry *entry,
3329 void *context, int vl, int mode, u64 data)
3330{
3331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332
3333 return dd->send_egress_err_status_cnt[35];
3334}
3335
3336static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3337 const struct cntr_entry *entry,
3338 void *context, int vl, int mode, u64 data)
3339{
3340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341
3342 return dd->send_egress_err_status_cnt[34];
3343}
3344
3345static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3346 const struct cntr_entry *entry,
3347 void *context, int vl, int mode, u64 data)
3348{
3349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350
3351 return dd->send_egress_err_status_cnt[33];
3352}
3353
3354static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3355 const struct cntr_entry *entry,
3356 void *context, int vl, int mode, u64 data)
3357{
3358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359
3360 return dd->send_egress_err_status_cnt[32];
3361}
3362
3363static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3364 const struct cntr_entry *entry,
3365 void *context, int vl, int mode, u64 data)
3366{
3367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368
3369 return dd->send_egress_err_status_cnt[31];
3370}
3371
3372static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3373 const struct cntr_entry *entry,
3374 void *context, int vl, int mode, u64 data)
3375{
3376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377
3378 return dd->send_egress_err_status_cnt[30];
3379}
3380
3381static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3382 const struct cntr_entry *entry,
3383 void *context, int vl, int mode, u64 data)
3384{
3385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386
3387 return dd->send_egress_err_status_cnt[29];
3388}
3389
3390static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3391 const struct cntr_entry *entry,
3392 void *context, int vl, int mode, u64 data)
3393{
3394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395
3396 return dd->send_egress_err_status_cnt[28];
3397}
3398
3399static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3400 const struct cntr_entry *entry,
3401 void *context, int vl, int mode, u64 data)
3402{
3403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404
3405 return dd->send_egress_err_status_cnt[27];
3406}
3407
3408static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3409 const struct cntr_entry *entry,
3410 void *context, int vl, int mode, u64 data)
3411{
3412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413
3414 return dd->send_egress_err_status_cnt[26];
3415}
3416
3417static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3418 const struct cntr_entry *entry,
3419 void *context, int vl, int mode, u64 data)
3420{
3421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422
3423 return dd->send_egress_err_status_cnt[25];
3424}
3425
3426static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3427 const struct cntr_entry *entry,
3428 void *context, int vl, int mode, u64 data)
3429{
3430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431
3432 return dd->send_egress_err_status_cnt[24];
3433}
3434
3435static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3436 const struct cntr_entry *entry,
3437 void *context, int vl, int mode, u64 data)
3438{
3439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440
3441 return dd->send_egress_err_status_cnt[23];
3442}
3443
3444static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3445 const struct cntr_entry *entry,
3446 void *context, int vl, int mode, u64 data)
3447{
3448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449
3450 return dd->send_egress_err_status_cnt[22];
3451}
3452
3453static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3454 const struct cntr_entry *entry,
3455 void *context, int vl, int mode, u64 data)
3456{
3457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458
3459 return dd->send_egress_err_status_cnt[21];
3460}
3461
3462static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3463 const struct cntr_entry *entry,
3464 void *context, int vl, int mode, u64 data)
3465{
3466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467
3468 return dd->send_egress_err_status_cnt[20];
3469}
3470
3471static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3472 const struct cntr_entry *entry,
3473 void *context, int vl, int mode, u64 data)
3474{
3475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476
3477 return dd->send_egress_err_status_cnt[19];
3478}
3479
3480static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3481 const struct cntr_entry *entry,
3482 void *context, int vl, int mode, u64 data)
3483{
3484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485
3486 return dd->send_egress_err_status_cnt[18];
3487}
3488
3489static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3490 const struct cntr_entry *entry,
3491 void *context, int vl, int mode, u64 data)
3492{
3493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494
3495 return dd->send_egress_err_status_cnt[17];
3496}
3497
3498static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3499 const struct cntr_entry *entry,
3500 void *context, int vl, int mode, u64 data)
3501{
3502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503
3504 return dd->send_egress_err_status_cnt[16];
3505}
3506
3507static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3508 void *context, int vl, int mode,
3509 u64 data)
3510{
3511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512
3513 return dd->send_egress_err_status_cnt[15];
3514}
3515
3516static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3517 void *context, int vl,
3518 int mode, u64 data)
3519{
3520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521
3522 return dd->send_egress_err_status_cnt[14];
3523}
3524
3525static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3526 void *context, int vl, int mode,
3527 u64 data)
3528{
3529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530
3531 return dd->send_egress_err_status_cnt[13];
3532}
3533
3534static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3535 void *context, int vl, int mode,
3536 u64 data)
3537{
3538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539
3540 return dd->send_egress_err_status_cnt[12];
3541}
3542
3543static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3544 const struct cntr_entry *entry,
3545 void *context, int vl, int mode, u64 data)
3546{
3547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548
3549 return dd->send_egress_err_status_cnt[11];
3550}
3551
3552static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3553 void *context, int vl, int mode,
3554 u64 data)
3555{
3556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557
3558 return dd->send_egress_err_status_cnt[10];
3559}
3560
3561static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3562 void *context, int vl, int mode,
3563 u64 data)
3564{
3565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566
3567 return dd->send_egress_err_status_cnt[9];
3568}
3569
3570static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3571 const struct cntr_entry *entry,
3572 void *context, int vl, int mode, u64 data)
3573{
3574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575
3576 return dd->send_egress_err_status_cnt[8];
3577}
3578
3579static u64 access_tx_pio_launch_intf_parity_err_cnt(
3580 const struct cntr_entry *entry,
3581 void *context, int vl, int mode, u64 data)
3582{
3583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584
3585 return dd->send_egress_err_status_cnt[7];
3586}
3587
3588static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3589 void *context, int vl, int mode,
3590 u64 data)
3591{
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594 return dd->send_egress_err_status_cnt[6];
3595}
3596
3597static u64 access_tx_incorrect_link_state_err_cnt(
3598 const struct cntr_entry *entry,
3599 void *context, int vl, int mode, u64 data)
3600{
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603 return dd->send_egress_err_status_cnt[5];
3604}
3605
3606static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3607 void *context, int vl, int mode,
3608 u64 data)
3609{
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612 return dd->send_egress_err_status_cnt[4];
3613}
3614
3615static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3616 const struct cntr_entry *entry,
3617 void *context, int vl, int mode, u64 data)
3618{
3619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620
3621 return dd->send_egress_err_status_cnt[3];
3622}
3623
3624static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3625 void *context, int vl, int mode,
3626 u64 data)
3627{
3628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629
3630 return dd->send_egress_err_status_cnt[2];
3631}
3632
3633static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3634 const struct cntr_entry *entry,
3635 void *context, int vl, int mode, u64 data)
3636{
3637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3638
3639 return dd->send_egress_err_status_cnt[1];
3640}
3641
3642static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3643 const struct cntr_entry *entry,
3644 void *context, int vl, int mode, u64 data)
3645{
3646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3647
3648 return dd->send_egress_err_status_cnt[0];
3649}
3650
3651/*
3652 * Software counters corresponding to each of the
3653 * error status bits within SendErrStatus
3654 */
3655static u64 access_send_csr_write_bad_addr_err_cnt(
3656 const struct cntr_entry *entry,
3657 void *context, int vl, int mode, u64 data)
3658{
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661 return dd->send_err_status_cnt[2];
3662}
3663
3664static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3665 void *context, int vl,
3666 int mode, u64 data)
3667{
3668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669
3670 return dd->send_err_status_cnt[1];
3671}
3672
3673static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3674 void *context, int vl, int mode,
3675 u64 data)
3676{
3677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678
3679 return dd->send_err_status_cnt[0];
3680}
3681
3682/*
3683 * Software counters corresponding to each of the
3684 * error status bits within SendCtxtErrStatus
3685 */
3686static u64 access_pio_write_out_of_bounds_err_cnt(
3687 const struct cntr_entry *entry,
3688 void *context, int vl, int mode, u64 data)
3689{
3690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691
3692 return dd->sw_ctxt_err_status_cnt[4];
3693}
3694
3695static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3696 void *context, int vl, int mode,
3697 u64 data)
3698{
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701 return dd->sw_ctxt_err_status_cnt[3];
3702}
3703
3704static u64 access_pio_write_crosses_boundary_err_cnt(
3705 const struct cntr_entry *entry,
3706 void *context, int vl, int mode, u64 data)
3707{
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710 return dd->sw_ctxt_err_status_cnt[2];
3711}
3712
3713static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3714 void *context, int vl,
3715 int mode, u64 data)
3716{
3717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718
3719 return dd->sw_ctxt_err_status_cnt[1];
3720}
3721
3722static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3723 void *context, int vl, int mode,
3724 u64 data)
3725{
3726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3727
3728 return dd->sw_ctxt_err_status_cnt[0];
3729}
3730
3731/*
3732 * Software counters corresponding to each of the
3733 * error status bits within SendDmaEngErrStatus
3734 */
3735static u64 access_sdma_header_request_fifo_cor_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3738{
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741 return dd->sw_send_dma_eng_err_status_cnt[23];
3742}
3743
3744static u64 access_sdma_header_storage_cor_err_cnt(
3745 const struct cntr_entry *entry,
3746 void *context, int vl, int mode, u64 data)
3747{
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750 return dd->sw_send_dma_eng_err_status_cnt[22];
3751}
3752
3753static u64 access_sdma_packet_tracking_cor_err_cnt(
3754 const struct cntr_entry *entry,
3755 void *context, int vl, int mode, u64 data)
3756{
3757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759 return dd->sw_send_dma_eng_err_status_cnt[21];
3760}
3761
3762static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3763 void *context, int vl, int mode,
3764 u64 data)
3765{
3766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768 return dd->sw_send_dma_eng_err_status_cnt[20];
3769}
3770
3771static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3772 void *context, int vl, int mode,
3773 u64 data)
3774{
3775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776
3777 return dd->sw_send_dma_eng_err_status_cnt[19];
3778}
3779
3780static u64 access_sdma_header_request_fifo_unc_err_cnt(
3781 const struct cntr_entry *entry,
3782 void *context, int vl, int mode, u64 data)
3783{
3784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785
3786 return dd->sw_send_dma_eng_err_status_cnt[18];
3787}
3788
3789static u64 access_sdma_header_storage_unc_err_cnt(
3790 const struct cntr_entry *entry,
3791 void *context, int vl, int mode, u64 data)
3792{
3793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795 return dd->sw_send_dma_eng_err_status_cnt[17];
3796}
3797
3798static u64 access_sdma_packet_tracking_unc_err_cnt(
3799 const struct cntr_entry *entry,
3800 void *context, int vl, int mode, u64 data)
3801{
3802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804 return dd->sw_send_dma_eng_err_status_cnt[16];
3805}
3806
3807static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3808 void *context, int vl, int mode,
3809 u64 data)
3810{
3811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813 return dd->sw_send_dma_eng_err_status_cnt[15];
3814}
3815
3816static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3817 void *context, int vl, int mode,
3818 u64 data)
3819{
3820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822 return dd->sw_send_dma_eng_err_status_cnt[14];
3823}
3824
3825static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3826 void *context, int vl, int mode,
3827 u64 data)
3828{
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831 return dd->sw_send_dma_eng_err_status_cnt[13];
3832}
3833
3834static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3835 void *context, int vl, int mode,
3836 u64 data)
3837{
3838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839
3840 return dd->sw_send_dma_eng_err_status_cnt[12];
3841}
3842
3843static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3844 void *context, int vl, int mode,
3845 u64 data)
3846{
3847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848
3849 return dd->sw_send_dma_eng_err_status_cnt[11];
3850}
3851
3852static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3853 void *context, int vl, int mode,
3854 u64 data)
3855{
3856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857
3858 return dd->sw_send_dma_eng_err_status_cnt[10];
3859}
3860
3861static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3862 void *context, int vl, int mode,
3863 u64 data)
3864{
3865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866
3867 return dd->sw_send_dma_eng_err_status_cnt[9];
3868}
3869
3870static u64 access_sdma_packet_desc_overflow_err_cnt(
3871 const struct cntr_entry *entry,
3872 void *context, int vl, int mode, u64 data)
3873{
3874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3875
3876 return dd->sw_send_dma_eng_err_status_cnt[8];
3877}
3878
3879static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3880 void *context, int vl,
3881 int mode, u64 data)
3882{
3883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3884
3885 return dd->sw_send_dma_eng_err_status_cnt[7];
3886}
3887
3888static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3889 void *context, int vl, int mode, u64 data)
3890{
3891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892
3893 return dd->sw_send_dma_eng_err_status_cnt[6];
3894}
3895
3896static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3897 void *context, int vl, int mode,
3898 u64 data)
3899{
3900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901
3902 return dd->sw_send_dma_eng_err_status_cnt[5];
3903}
3904
3905static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3906 void *context, int vl, int mode,
3907 u64 data)
3908{
3909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910
3911 return dd->sw_send_dma_eng_err_status_cnt[4];
3912}
3913
3914static u64 access_sdma_tail_out_of_bounds_err_cnt(
3915 const struct cntr_entry *entry,
3916 void *context, int vl, int mode, u64 data)
3917{
3918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919
3920 return dd->sw_send_dma_eng_err_status_cnt[3];
3921}
3922
3923static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3924 void *context, int vl, int mode,
3925 u64 data)
3926{
3927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928
3929 return dd->sw_send_dma_eng_err_status_cnt[2];
3930}
3931
3932static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3933 void *context, int vl, int mode,
3934 u64 data)
3935{
3936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3937
3938 return dd->sw_send_dma_eng_err_status_cnt[1];
3939}
3940
3941static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3942 void *context, int vl, int mode,
3943 u64 data)
3944{
3945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3946
3947 return dd->sw_send_dma_eng_err_status_cnt[0];
3948}
3949
Mike Marciniszyn77241052015-07-30 15:17:43 -04003950#define def_access_sw_cpu(cntr) \
3951static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3952 void *context, int vl, int mode, u64 data) \
3953{ \
3954 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003955 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3956 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003957 mode, data); \
3958}
3959
3960def_access_sw_cpu(rc_acks);
3961def_access_sw_cpu(rc_qacks);
3962def_access_sw_cpu(rc_delayed_comp);
3963
3964#define def_access_ibp_counter(cntr) \
3965static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3966 void *context, int vl, int mode, u64 data) \
3967{ \
3968 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3969 \
3970 if (vl != CNTR_INVALID_VL) \
3971 return 0; \
3972 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003973 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003974 mode, data); \
3975}
3976
3977def_access_ibp_counter(loop_pkts);
3978def_access_ibp_counter(rc_resends);
3979def_access_ibp_counter(rnr_naks);
3980def_access_ibp_counter(other_naks);
3981def_access_ibp_counter(rc_timeouts);
3982def_access_ibp_counter(pkt_drops);
3983def_access_ibp_counter(dmawait);
3984def_access_ibp_counter(rc_seqnak);
3985def_access_ibp_counter(rc_dupreq);
3986def_access_ibp_counter(rdma_seq);
3987def_access_ibp_counter(unaligned);
3988def_access_ibp_counter(seq_naks);
3989
3990static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3991[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3992[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3993 CNTR_NORMAL),
3994[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3995 CNTR_NORMAL),
3996[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3997 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3998 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003999[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4000 CNTR_NORMAL),
4001[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4002 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4003[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4004 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4005[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4006 CNTR_NORMAL),
4007[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4008 CNTR_NORMAL),
4009[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4010 CNTR_NORMAL),
4011[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4012 CNTR_NORMAL),
4013[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4014 CNTR_NORMAL),
4015[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4016 CNTR_NORMAL),
4017[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4018 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4019[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4020 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4021[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4022 CNTR_SYNTH),
4023[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4024[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4025 CNTR_SYNTH),
4026[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4027 CNTR_SYNTH),
4028[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4029 CNTR_SYNTH),
4030[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4031 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4032[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4033 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4034 CNTR_SYNTH),
4035[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4036 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4037[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4038 CNTR_SYNTH),
4039[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4040 CNTR_SYNTH),
4041[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4042 CNTR_SYNTH),
4043[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4044 CNTR_SYNTH),
4045[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4046 CNTR_SYNTH),
4047[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4048 CNTR_SYNTH),
4049[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4050 CNTR_SYNTH),
4051[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4052 CNTR_SYNTH | CNTR_VL),
4053[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4054 CNTR_SYNTH | CNTR_VL),
4055[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4056[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4057 CNTR_SYNTH | CNTR_VL),
4058[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4059[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4060 CNTR_SYNTH | CNTR_VL),
4061[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4062 CNTR_SYNTH),
4063[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4064 CNTR_SYNTH | CNTR_VL),
4065[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4066 CNTR_SYNTH),
4067[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4068 CNTR_SYNTH | CNTR_VL),
4069[C_DC_TOTAL_CRC] =
4070 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4071 CNTR_SYNTH),
4072[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4073 CNTR_SYNTH),
4074[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4075 CNTR_SYNTH),
4076[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4077 CNTR_SYNTH),
4078[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4079 CNTR_SYNTH),
4080[C_DC_CRC_MULT_LN] =
4081 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4082 CNTR_SYNTH),
4083[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4084 CNTR_SYNTH),
4085[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4086 CNTR_SYNTH),
4087[C_DC_SEQ_CRC_CNT] =
4088 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4089 CNTR_SYNTH),
4090[C_DC_ESC0_ONLY_CNT] =
4091 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4092 CNTR_SYNTH),
4093[C_DC_ESC0_PLUS1_CNT] =
4094 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4095 CNTR_SYNTH),
4096[C_DC_ESC0_PLUS2_CNT] =
4097 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4098 CNTR_SYNTH),
4099[C_DC_REINIT_FROM_PEER_CNT] =
4100 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4101 CNTR_SYNTH),
4102[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4103 CNTR_SYNTH),
4104[C_DC_MISC_FLG_CNT] =
4105 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4106 CNTR_SYNTH),
4107[C_DC_PRF_GOOD_LTP_CNT] =
4108 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4109[C_DC_PRF_ACCEPTED_LTP_CNT] =
4110 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4111 CNTR_SYNTH),
4112[C_DC_PRF_RX_FLIT_CNT] =
4113 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4114[C_DC_PRF_TX_FLIT_CNT] =
4115 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4116[C_DC_PRF_CLK_CNTR] =
4117 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4118[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4119 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4120[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4121 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4122 CNTR_SYNTH),
4123[C_DC_PG_STS_TX_SBE_CNT] =
4124 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4125[C_DC_PG_STS_TX_MBE_CNT] =
4126 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4127 CNTR_SYNTH),
4128[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4129 access_sw_cpu_intr),
4130[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4131 access_sw_cpu_rcv_limit),
4132[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4133 access_sw_vtx_wait),
4134[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4135 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004136[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4137 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004138[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4139 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004140[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4141 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004142[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4143 SEND_DMA_DESC_FETCHED_CNT, 0,
4144 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4145 dev_access_u32_csr),
4146[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4147 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4148 access_sde_int_cnt),
4149[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4150 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4151 access_sde_err_cnt),
4152[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4153 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4154 access_sde_idle_int_cnt),
4155[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4156 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4157 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004158/* MISC_ERR_STATUS */
4159[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4160 CNTR_NORMAL,
4161 access_misc_pll_lock_fail_err_cnt),
4162[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4163 CNTR_NORMAL,
4164 access_misc_mbist_fail_err_cnt),
4165[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4166 CNTR_NORMAL,
4167 access_misc_invalid_eep_cmd_err_cnt),
4168[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4169 CNTR_NORMAL,
4170 access_misc_efuse_done_parity_err_cnt),
4171[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4172 CNTR_NORMAL,
4173 access_misc_efuse_write_err_cnt),
4174[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4175 0, CNTR_NORMAL,
4176 access_misc_efuse_read_bad_addr_err_cnt),
4177[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4178 CNTR_NORMAL,
4179 access_misc_efuse_csr_parity_err_cnt),
4180[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4181 CNTR_NORMAL,
4182 access_misc_fw_auth_failed_err_cnt),
4183[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4184 CNTR_NORMAL,
4185 access_misc_key_mismatch_err_cnt),
4186[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4187 CNTR_NORMAL,
4188 access_misc_sbus_write_failed_err_cnt),
4189[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4190 CNTR_NORMAL,
4191 access_misc_csr_write_bad_addr_err_cnt),
4192[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4193 CNTR_NORMAL,
4194 access_misc_csr_read_bad_addr_err_cnt),
4195[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4196 CNTR_NORMAL,
4197 access_misc_csr_parity_err_cnt),
4198/* CceErrStatus */
4199[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4200 CNTR_NORMAL,
4201 access_sw_cce_err_status_aggregated_cnt),
4202[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4203 CNTR_NORMAL,
4204 access_cce_msix_csr_parity_err_cnt),
4205[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4206 CNTR_NORMAL,
4207 access_cce_int_map_unc_err_cnt),
4208[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4209 CNTR_NORMAL,
4210 access_cce_int_map_cor_err_cnt),
4211[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4212 CNTR_NORMAL,
4213 access_cce_msix_table_unc_err_cnt),
4214[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4215 CNTR_NORMAL,
4216 access_cce_msix_table_cor_err_cnt),
4217[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4218 0, CNTR_NORMAL,
4219 access_cce_rxdma_conv_fifo_parity_err_cnt),
4220[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4221 0, CNTR_NORMAL,
4222 access_cce_rcpl_async_fifo_parity_err_cnt),
4223[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4224 CNTR_NORMAL,
4225 access_cce_seg_write_bad_addr_err_cnt),
4226[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4227 CNTR_NORMAL,
4228 access_cce_seg_read_bad_addr_err_cnt),
4229[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4230 CNTR_NORMAL,
4231 access_la_triggered_cnt),
4232[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_cce_trgt_cpl_timeout_err_cnt),
4235[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_pcic_receive_parity_err_cnt),
4238[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_pcic_transmit_back_parity_err_cnt),
4241[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4242 0, CNTR_NORMAL,
4243 access_pcic_transmit_front_parity_err_cnt),
4244[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4245 CNTR_NORMAL,
4246 access_pcic_cpl_dat_q_unc_err_cnt),
4247[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4248 CNTR_NORMAL,
4249 access_pcic_cpl_hd_q_unc_err_cnt),
4250[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4251 CNTR_NORMAL,
4252 access_pcic_post_dat_q_unc_err_cnt),
4253[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_pcic_post_hd_q_unc_err_cnt),
4256[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4257 CNTR_NORMAL,
4258 access_pcic_retry_sot_mem_unc_err_cnt),
4259[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4260 CNTR_NORMAL,
4261 access_pcic_retry_mem_unc_err),
4262[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_pcic_n_post_dat_q_parity_err_cnt),
4265[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4266 CNTR_NORMAL,
4267 access_pcic_n_post_h_q_parity_err_cnt),
4268[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4269 CNTR_NORMAL,
4270 access_pcic_cpl_dat_q_cor_err_cnt),
4271[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4272 CNTR_NORMAL,
4273 access_pcic_cpl_hd_q_cor_err_cnt),
4274[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4275 CNTR_NORMAL,
4276 access_pcic_post_dat_q_cor_err_cnt),
4277[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4278 CNTR_NORMAL,
4279 access_pcic_post_hd_q_cor_err_cnt),
4280[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_pcic_retry_sot_mem_cor_err_cnt),
4283[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4284 CNTR_NORMAL,
4285 access_pcic_retry_mem_cor_err_cnt),
4286[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4287 "CceCli1AsyncFifoDbgParityError", 0, 0,
4288 CNTR_NORMAL,
4289 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4290[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4291 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4292 CNTR_NORMAL,
4293 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4294 ),
4295[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4296 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4297 CNTR_NORMAL,
4298 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4299[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4300 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4303[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4304 0, CNTR_NORMAL,
4305 access_cce_cli2_async_fifo_parity_err_cnt),
4306[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4307 CNTR_NORMAL,
4308 access_cce_csr_cfg_bus_parity_err_cnt),
4309[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4310 0, CNTR_NORMAL,
4311 access_cce_cli0_async_fifo_parity_err_cnt),
4312[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_cce_rspd_data_parity_err_cnt),
4315[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_cce_trgt_access_err_cnt),
4318[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4319 0, CNTR_NORMAL,
4320 access_cce_trgt_async_fifo_parity_err_cnt),
4321[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4322 CNTR_NORMAL,
4323 access_cce_csr_write_bad_addr_err_cnt),
4324[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_cce_csr_read_bad_addr_err_cnt),
4327[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_ccs_csr_parity_err_cnt),
4330
4331/* RcvErrStatus */
4332[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_rx_csr_parity_err_cnt),
4335[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4336 CNTR_NORMAL,
4337 access_rx_csr_write_bad_addr_err_cnt),
4338[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_rx_csr_read_bad_addr_err_cnt),
4341[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4342 CNTR_NORMAL,
4343 access_rx_dma_csr_unc_err_cnt),
4344[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4345 CNTR_NORMAL,
4346 access_rx_dma_dq_fsm_encoding_err_cnt),
4347[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_rx_dma_eq_fsm_encoding_err_cnt),
4350[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4351 CNTR_NORMAL,
4352 access_rx_dma_csr_parity_err_cnt),
4353[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4354 CNTR_NORMAL,
4355 access_rx_rbuf_data_cor_err_cnt),
4356[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4357 CNTR_NORMAL,
4358 access_rx_rbuf_data_unc_err_cnt),
4359[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_rx_dma_data_fifo_rd_cor_err_cnt),
4362[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_dma_data_fifo_rd_unc_err_cnt),
4365[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4368[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4371[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_rbuf_desc_part2_cor_err_cnt),
4374[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_rbuf_desc_part2_unc_err_cnt),
4377[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4378 CNTR_NORMAL,
4379 access_rx_rbuf_desc_part1_cor_err_cnt),
4380[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4381 CNTR_NORMAL,
4382 access_rx_rbuf_desc_part1_unc_err_cnt),
4383[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_hq_intr_fsm_err_cnt),
4386[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_hq_intr_csr_parity_err_cnt),
4389[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_lookup_csr_parity_err_cnt),
4392[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4393 CNTR_NORMAL,
4394 access_rx_lookup_rcv_array_cor_err_cnt),
4395[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4396 CNTR_NORMAL,
4397 access_rx_lookup_rcv_array_unc_err_cnt),
4398[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4399 0, CNTR_NORMAL,
4400 access_rx_lookup_des_part2_parity_err_cnt),
4401[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4402 0, CNTR_NORMAL,
4403 access_rx_lookup_des_part1_unc_cor_err_cnt),
4404[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4405 CNTR_NORMAL,
4406 access_rx_lookup_des_part1_unc_err_cnt),
4407[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rx_rbuf_next_free_buf_cor_err_cnt),
4410[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_rx_rbuf_next_free_buf_unc_err_cnt),
4413[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4414 "RxRbufFlInitWrAddrParityErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4417[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4418 0, CNTR_NORMAL,
4419 access_rx_rbuf_fl_initdone_parity_err_cnt),
4420[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4421 0, CNTR_NORMAL,
4422 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4423[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4424 CNTR_NORMAL,
4425 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4426[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_rx_rbuf_empty_err_cnt),
4429[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4430 CNTR_NORMAL,
4431 access_rx_rbuf_full_err_cnt),
4432[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4433 CNTR_NORMAL,
4434 access_rbuf_bad_lookup_err_cnt),
4435[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4436 CNTR_NORMAL,
4437 access_rbuf_ctx_id_parity_err_cnt),
4438[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4439 CNTR_NORMAL,
4440 access_rbuf_csr_qeopdw_parity_err_cnt),
4441[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4442 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4445[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4446 "RxRbufCsrQTlPtrParityErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4449[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4450 0, CNTR_NORMAL,
4451 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4452[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4453 0, CNTR_NORMAL,
4454 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4455[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4456 0, 0, CNTR_NORMAL,
4457 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4458[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4459 0, CNTR_NORMAL,
4460 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4461[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4462 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4463 CNTR_NORMAL,
4464 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4465[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4466 0, CNTR_NORMAL,
4467 access_rx_rbuf_block_list_read_cor_err_cnt),
4468[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4469 0, CNTR_NORMAL,
4470 access_rx_rbuf_block_list_read_unc_err_cnt),
4471[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4472 CNTR_NORMAL,
4473 access_rx_rbuf_lookup_des_cor_err_cnt),
4474[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4475 CNTR_NORMAL,
4476 access_rx_rbuf_lookup_des_unc_err_cnt),
4477[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4478 "RxRbufLookupDesRegUncCorErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4481[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4484[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_rbuf_free_list_cor_err_cnt),
4487[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rbuf_free_list_unc_err_cnt),
4490[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rcv_fsm_encoding_err_cnt),
4493[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_dma_flag_cor_err_cnt),
4496[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_dma_flag_unc_err_cnt),
4499[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_dc_sop_eop_parity_err_cnt),
4502[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_rcv_csr_parity_err_cnt),
4505[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_rcv_qp_map_table_cor_err_cnt),
4508[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4509 CNTR_NORMAL,
4510 access_rx_rcv_qp_map_table_unc_err_cnt),
4511[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4512 CNTR_NORMAL,
4513 access_rx_rcv_data_cor_err_cnt),
4514[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_rcv_data_unc_err_cnt),
4517[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rcv_hdr_cor_err_cnt),
4520[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_rcv_hdr_unc_err_cnt),
4523[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4524 CNTR_NORMAL,
4525 access_rx_dc_intf_parity_err_cnt),
4526[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_dma_csr_cor_err_cnt),
4529/* SendPioErrStatus */
4530[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4531 CNTR_NORMAL,
4532 access_pio_pec_sop_head_parity_err_cnt),
4533[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_pio_pcc_sop_head_parity_err_cnt),
4536[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4537 0, 0, CNTR_NORMAL,
4538 access_pio_last_returned_cnt_parity_err_cnt),
4539[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4540 0, CNTR_NORMAL,
4541 access_pio_current_free_cnt_parity_err_cnt),
4542[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4543 CNTR_NORMAL,
4544 access_pio_reserved_31_err_cnt),
4545[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4546 CNTR_NORMAL,
4547 access_pio_reserved_30_err_cnt),
4548[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_pio_ppmc_sop_len_err_cnt),
4551[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_pio_ppmc_bqc_mem_parity_err_cnt),
4554[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_pio_vl_fifo_parity_err_cnt),
4557[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_pio_vlf_sop_parity_err_cnt),
4560[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_vlf_v1_len_parity_err_cnt),
4563[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_pio_block_qw_count_parity_err_cnt),
4566[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4567 CNTR_NORMAL,
4568 access_pio_write_qw_valid_parity_err_cnt),
4569[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4570 CNTR_NORMAL,
4571 access_pio_state_machine_err_cnt),
4572[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_write_data_parity_err_cnt),
4575[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_host_addr_mem_cor_err_cnt),
4578[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_host_addr_mem_unc_err_cnt),
4581[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4584[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_init_sm_in_err_cnt),
4587[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_ppmc_pbl_fifo_err_cnt),
4590[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4591 0, CNTR_NORMAL,
4592 access_pio_credit_ret_fifo_parity_err_cnt),
4593[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_pio_v1_len_mem_bank1_cor_err_cnt),
4596[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_pio_v1_len_mem_bank0_cor_err_cnt),
4599[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4600 CNTR_NORMAL,
4601 access_pio_v1_len_mem_bank1_unc_err_cnt),
4602[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4603 CNTR_NORMAL,
4604 access_pio_v1_len_mem_bank0_unc_err_cnt),
4605[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_sm_pkt_reset_parity_err_cnt),
4608[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_pkt_evict_fifo_parity_err_cnt),
4611[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4612 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4615[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_pio_sbrdctl_crrel_parity_err_cnt),
4618[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_pio_pec_fifo_parity_err_cnt),
4621[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4622 CNTR_NORMAL,
4623 access_pio_pcc_fifo_parity_err_cnt),
4624[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4625 CNTR_NORMAL,
4626 access_pio_sb_mem_fifo1_err_cnt),
4627[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4628 CNTR_NORMAL,
4629 access_pio_sb_mem_fifo0_err_cnt),
4630[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_pio_csr_parity_err_cnt),
4633[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_pio_write_addr_parity_err_cnt),
4636[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_pio_write_bad_ctxt_err_cnt),
4639/* SendDmaErrStatus */
4640[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4641 0, CNTR_NORMAL,
4642 access_sdma_pcie_req_tracking_cor_err_cnt),
4643[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4644 0, CNTR_NORMAL,
4645 access_sdma_pcie_req_tracking_unc_err_cnt),
4646[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4647 CNTR_NORMAL,
4648 access_sdma_csr_parity_err_cnt),
4649[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4650 CNTR_NORMAL,
4651 access_sdma_rpy_tag_err_cnt),
4652/* SendEgressErrStatus */
4653[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4654 CNTR_NORMAL,
4655 access_tx_read_pio_memory_csr_unc_err_cnt),
4656[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4657 0, CNTR_NORMAL,
4658 access_tx_read_sdma_memory_csr_err_cnt),
4659[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_tx_egress_fifo_cor_err_cnt),
4662[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_tx_read_pio_memory_cor_err_cnt),
4665[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_tx_read_sdma_memory_cor_err_cnt),
4668[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_tx_sb_hdr_cor_err_cnt),
4671[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4672 CNTR_NORMAL,
4673 access_tx_credit_overrun_err_cnt),
4674[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4675 CNTR_NORMAL,
4676 access_tx_launch_fifo8_cor_err_cnt),
4677[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4678 CNTR_NORMAL,
4679 access_tx_launch_fifo7_cor_err_cnt),
4680[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_tx_launch_fifo6_cor_err_cnt),
4683[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_tx_launch_fifo5_cor_err_cnt),
4686[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4687 CNTR_NORMAL,
4688 access_tx_launch_fifo4_cor_err_cnt),
4689[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_launch_fifo3_cor_err_cnt),
4692[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_launch_fifo2_cor_err_cnt),
4695[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_launch_fifo1_cor_err_cnt),
4698[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_launch_fifo0_cor_err_cnt),
4701[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4702 CNTR_NORMAL,
4703 access_tx_credit_return_vl_err_cnt),
4704[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_tx_hcrc_insertion_err_cnt),
4707[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_tx_egress_fifo_unc_err_cnt),
4710[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_tx_read_pio_memory_unc_err_cnt),
4713[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_tx_read_sdma_memory_unc_err_cnt),
4716[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_tx_sb_hdr_unc_err_cnt),
4719[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4720 CNTR_NORMAL,
4721 access_tx_credit_return_partiy_err_cnt),
4722[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4723 0, 0, CNTR_NORMAL,
4724 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4725[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4726 0, 0, CNTR_NORMAL,
4727 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4728[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4729 0, 0, CNTR_NORMAL,
4730 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4731[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4732 0, 0, CNTR_NORMAL,
4733 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4734[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4735 0, 0, CNTR_NORMAL,
4736 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4737[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4738 0, 0, CNTR_NORMAL,
4739 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4740[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4741 0, 0, CNTR_NORMAL,
4742 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4743[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4744 0, 0, CNTR_NORMAL,
4745 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4746[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4747 0, 0, CNTR_NORMAL,
4748 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4749[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4750 0, 0, CNTR_NORMAL,
4751 access_tx_sdma15_disallowed_packet_err_cnt),
4752[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_sdma14_disallowed_packet_err_cnt),
4755[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_sdma13_disallowed_packet_err_cnt),
4758[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_sdma12_disallowed_packet_err_cnt),
4761[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_sdma11_disallowed_packet_err_cnt),
4764[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_sdma10_disallowed_packet_err_cnt),
4767[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_sdma9_disallowed_packet_err_cnt),
4770[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_sdma8_disallowed_packet_err_cnt),
4773[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_sdma7_disallowed_packet_err_cnt),
4776[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4777 0, 0, CNTR_NORMAL,
4778 access_tx_sdma6_disallowed_packet_err_cnt),
4779[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4780 0, 0, CNTR_NORMAL,
4781 access_tx_sdma5_disallowed_packet_err_cnt),
4782[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4783 0, 0, CNTR_NORMAL,
4784 access_tx_sdma4_disallowed_packet_err_cnt),
4785[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4786 0, 0, CNTR_NORMAL,
4787 access_tx_sdma3_disallowed_packet_err_cnt),
4788[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4789 0, 0, CNTR_NORMAL,
4790 access_tx_sdma2_disallowed_packet_err_cnt),
4791[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4792 0, 0, CNTR_NORMAL,
4793 access_tx_sdma1_disallowed_packet_err_cnt),
4794[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4795 0, 0, CNTR_NORMAL,
4796 access_tx_sdma0_disallowed_packet_err_cnt),
4797[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4798 CNTR_NORMAL,
4799 access_tx_config_parity_err_cnt),
4800[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4801 CNTR_NORMAL,
4802 access_tx_sbrd_ctl_csr_parity_err_cnt),
4803[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4804 CNTR_NORMAL,
4805 access_tx_launch_csr_parity_err_cnt),
4806[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4807 CNTR_NORMAL,
4808 access_tx_illegal_vl_err_cnt),
4809[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4810 "TxSbrdCtlStateMachineParityErr", 0, 0,
4811 CNTR_NORMAL,
4812 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4813[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4814 CNTR_NORMAL,
4815 access_egress_reserved_10_err_cnt),
4816[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4817 CNTR_NORMAL,
4818 access_egress_reserved_9_err_cnt),
4819[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4820 0, 0, CNTR_NORMAL,
4821 access_tx_sdma_launch_intf_parity_err_cnt),
4822[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4823 CNTR_NORMAL,
4824 access_tx_pio_launch_intf_parity_err_cnt),
4825[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4826 CNTR_NORMAL,
4827 access_egress_reserved_6_err_cnt),
4828[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4829 CNTR_NORMAL,
4830 access_tx_incorrect_link_state_err_cnt),
4831[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4832 CNTR_NORMAL,
4833 access_tx_linkdown_err_cnt),
4834[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4835 "EgressFifoUnderrunOrParityErr", 0, 0,
4836 CNTR_NORMAL,
4837 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4838[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4839 CNTR_NORMAL,
4840 access_egress_reserved_2_err_cnt),
4841[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_tx_pkt_integrity_mem_unc_err_cnt),
4844[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4845 CNTR_NORMAL,
4846 access_tx_pkt_integrity_mem_cor_err_cnt),
4847/* SendErrStatus */
4848[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4849 CNTR_NORMAL,
4850 access_send_csr_write_bad_addr_err_cnt),
4851[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4852 CNTR_NORMAL,
4853 access_send_csr_read_bad_addr_err_cnt),
4854[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4855 CNTR_NORMAL,
4856 access_send_csr_parity_cnt),
4857/* SendCtxtErrStatus */
4858[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4859 CNTR_NORMAL,
4860 access_pio_write_out_of_bounds_err_cnt),
4861[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4862 CNTR_NORMAL,
4863 access_pio_write_overflow_err_cnt),
4864[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4865 0, 0, CNTR_NORMAL,
4866 access_pio_write_crosses_boundary_err_cnt),
4867[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4868 CNTR_NORMAL,
4869 access_pio_disallowed_packet_err_cnt),
4870[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4871 CNTR_NORMAL,
4872 access_pio_inconsistent_sop_err_cnt),
4873/* SendDmaEngErrStatus */
4874[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4875 0, 0, CNTR_NORMAL,
4876 access_sdma_header_request_fifo_cor_err_cnt),
4877[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4878 CNTR_NORMAL,
4879 access_sdma_header_storage_cor_err_cnt),
4880[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4881 CNTR_NORMAL,
4882 access_sdma_packet_tracking_cor_err_cnt),
4883[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4884 CNTR_NORMAL,
4885 access_sdma_assembly_cor_err_cnt),
4886[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4887 CNTR_NORMAL,
4888 access_sdma_desc_table_cor_err_cnt),
4889[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4890 0, 0, CNTR_NORMAL,
4891 access_sdma_header_request_fifo_unc_err_cnt),
4892[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4893 CNTR_NORMAL,
4894 access_sdma_header_storage_unc_err_cnt),
4895[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4896 CNTR_NORMAL,
4897 access_sdma_packet_tracking_unc_err_cnt),
4898[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4899 CNTR_NORMAL,
4900 access_sdma_assembly_unc_err_cnt),
4901[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4902 CNTR_NORMAL,
4903 access_sdma_desc_table_unc_err_cnt),
4904[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4905 CNTR_NORMAL,
4906 access_sdma_timeout_err_cnt),
4907[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_header_length_err_cnt),
4910[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_header_address_err_cnt),
4913[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_header_select_err_cnt),
4916[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4917 CNTR_NORMAL,
4918 access_sdma_reserved_9_err_cnt),
4919[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4920 CNTR_NORMAL,
4921 access_sdma_packet_desc_overflow_err_cnt),
4922[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_length_mismatch_err_cnt),
4925[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_sdma_halt_err_cnt),
4928[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4929 CNTR_NORMAL,
4930 access_sdma_mem_read_err_cnt),
4931[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4932 CNTR_NORMAL,
4933 access_sdma_first_desc_err_cnt),
4934[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4935 CNTR_NORMAL,
4936 access_sdma_tail_out_of_bounds_err_cnt),
4937[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4938 CNTR_NORMAL,
4939 access_sdma_too_long_err_cnt),
4940[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4941 CNTR_NORMAL,
4942 access_sdma_gen_mismatch_err_cnt),
4943[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4944 CNTR_NORMAL,
4945 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004946};
4947
4948static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4949[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4950 CNTR_NORMAL),
4951[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4952 CNTR_NORMAL),
4953[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4954 CNTR_NORMAL),
4955[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4956 CNTR_NORMAL),
4957[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4958 CNTR_NORMAL),
4959[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4960 CNTR_NORMAL),
4961[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4962 CNTR_NORMAL),
4963[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4964[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4965[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4966[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004967 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004968[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004969 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004970[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004971 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004972[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4973[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4974[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004975 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004976[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004977 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004978[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4979 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004980[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004981 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004982[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08004983 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4984 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004985[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004986 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004987[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004988 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004989[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4990[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4991[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4992[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4993[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4994[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4995[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4996[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4997[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4998[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4999[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5000[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5001[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5002 access_sw_cpu_rc_acks),
5003[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005004 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005005[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005006 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005007[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5008[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5009[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5010[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5011[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5012[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5013[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5014[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5015[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5016[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5017[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5018[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5019[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5020[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5021[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5022[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5023[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5024[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5025[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5026[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5027[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5028[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5029[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5030[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5031[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5032[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5033[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5034[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5035[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5036[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5037[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5038[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5039[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5040[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5041[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5042[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5043[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5044[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5045[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5046[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5047[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5048[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5049[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5050[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5051[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5052[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5053[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5054[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5055[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5056[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5057[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5058[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5059[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5060[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5061[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5062[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5063[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5064[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5065[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5066[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5067[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5068[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5069[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5070[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5071[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5072[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5073[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5074[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5075[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5076[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5077[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5078[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5079[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5080[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5081[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5082[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5083[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5084[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5085[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5086[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5087};
5088
5089/* ======================================================================== */
5090
Mike Marciniszyn77241052015-07-30 15:17:43 -04005091/* return true if this is chip revision revision a */
5092int is_ax(struct hfi1_devdata *dd)
5093{
5094 u8 chip_rev_minor =
5095 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5096 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5097 return (chip_rev_minor & 0xf0) == 0;
5098}
5099
5100/* return true if this is chip revision revision b */
5101int is_bx(struct hfi1_devdata *dd)
5102{
5103 u8 chip_rev_minor =
5104 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5105 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005106 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005107}
5108
5109/*
5110 * Append string s to buffer buf. Arguments curp and len are the current
5111 * position and remaining length, respectively.
5112 *
5113 * return 0 on success, 1 on out of room
5114 */
5115static int append_str(char *buf, char **curp, int *lenp, const char *s)
5116{
5117 char *p = *curp;
5118 int len = *lenp;
5119 int result = 0; /* success */
5120 char c;
5121
5122 /* add a comma, if first in the buffer */
5123 if (p != buf) {
5124 if (len == 0) {
5125 result = 1; /* out of room */
5126 goto done;
5127 }
5128 *p++ = ',';
5129 len--;
5130 }
5131
5132 /* copy the string */
5133 while ((c = *s++) != 0) {
5134 if (len == 0) {
5135 result = 1; /* out of room */
5136 goto done;
5137 }
5138 *p++ = c;
5139 len--;
5140 }
5141
5142done:
5143 /* write return values */
5144 *curp = p;
5145 *lenp = len;
5146
5147 return result;
5148}
5149
5150/*
5151 * Using the given flag table, print a comma separated string into
5152 * the buffer. End in '*' if the buffer is too short.
5153 */
5154static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005155 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005156{
5157 char extra[32];
5158 char *p = buf;
5159 int len = buf_len;
5160 int no_room = 0;
5161 int i;
5162
5163 /* make sure there is at least 2 so we can form "*" */
5164 if (len < 2)
5165 return "";
5166
5167 len--; /* leave room for a nul */
5168 for (i = 0; i < table_size; i++) {
5169 if (flags & table[i].flag) {
5170 no_room = append_str(buf, &p, &len, table[i].str);
5171 if (no_room)
5172 break;
5173 flags &= ~table[i].flag;
5174 }
5175 }
5176
5177 /* any undocumented bits left? */
5178 if (!no_room && flags) {
5179 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5180 no_room = append_str(buf, &p, &len, extra);
5181 }
5182
5183 /* add * if ran out of room */
5184 if (no_room) {
5185 /* may need to back up to add space for a '*' */
5186 if (len == 0)
5187 --p;
5188 *p++ = '*';
5189 }
5190
5191 /* add final nul - space already allocated above */
5192 *p = 0;
5193 return buf;
5194}
5195
5196/* first 8 CCE error interrupt source names */
5197static const char * const cce_misc_names[] = {
5198 "CceErrInt", /* 0 */
5199 "RxeErrInt", /* 1 */
5200 "MiscErrInt", /* 2 */
5201 "Reserved3", /* 3 */
5202 "PioErrInt", /* 4 */
5203 "SDmaErrInt", /* 5 */
5204 "EgressErrInt", /* 6 */
5205 "TxeErrInt" /* 7 */
5206};
5207
5208/*
5209 * Return the miscellaneous error interrupt name.
5210 */
5211static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5212{
5213 if (source < ARRAY_SIZE(cce_misc_names))
5214 strncpy(buf, cce_misc_names[source], bsize);
5215 else
Jubin John17fb4f22016-02-14 20:21:52 -08005216 snprintf(buf, bsize, "Reserved%u",
5217 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005218
5219 return buf;
5220}
5221
5222/*
5223 * Return the SDMA engine error interrupt name.
5224 */
5225static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5226{
5227 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5228 return buf;
5229}
5230
5231/*
5232 * Return the send context error interrupt name.
5233 */
5234static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5235{
5236 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5237 return buf;
5238}
5239
5240static const char * const various_names[] = {
5241 "PbcInt",
5242 "GpioAssertInt",
5243 "Qsfp1Int",
5244 "Qsfp2Int",
5245 "TCritInt"
5246};
5247
5248/*
5249 * Return the various interrupt name.
5250 */
5251static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5252{
5253 if (source < ARRAY_SIZE(various_names))
5254 strncpy(buf, various_names[source], bsize);
5255 else
Jubin John8638b772016-02-14 20:19:24 -08005256 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005257 return buf;
5258}
5259
5260/*
5261 * Return the DC interrupt name.
5262 */
5263static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5264{
5265 static const char * const dc_int_names[] = {
5266 "common",
5267 "lcb",
5268 "8051",
5269 "lbm" /* local block merge */
5270 };
5271
5272 if (source < ARRAY_SIZE(dc_int_names))
5273 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5274 else
5275 snprintf(buf, bsize, "DCInt%u", source);
5276 return buf;
5277}
5278
5279static const char * const sdma_int_names[] = {
5280 "SDmaInt",
5281 "SdmaIdleInt",
5282 "SdmaProgressInt",
5283};
5284
5285/*
5286 * Return the SDMA engine interrupt name.
5287 */
5288static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5289{
5290 /* what interrupt */
5291 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5292 /* which engine */
5293 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5294
5295 if (likely(what < 3))
5296 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5297 else
5298 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5299 return buf;
5300}
5301
5302/*
5303 * Return the receive available interrupt name.
5304 */
5305static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5306{
5307 snprintf(buf, bsize, "RcvAvailInt%u", source);
5308 return buf;
5309}
5310
5311/*
5312 * Return the receive urgent interrupt name.
5313 */
5314static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5315{
5316 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5317 return buf;
5318}
5319
5320/*
5321 * Return the send credit interrupt name.
5322 */
5323static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5324{
5325 snprintf(buf, bsize, "SendCreditInt%u", source);
5326 return buf;
5327}
5328
5329/*
5330 * Return the reserved interrupt name.
5331 */
5332static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5333{
5334 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5335 return buf;
5336}
5337
5338static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5339{
5340 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005341 cce_err_status_flags,
5342 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005343}
5344
5345static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5346{
5347 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005348 rxe_err_status_flags,
5349 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005350}
5351
5352static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5353{
5354 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005355 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005356}
5357
5358static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5359{
5360 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005361 pio_err_status_flags,
5362 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005363}
5364
5365static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5366{
5367 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005368 sdma_err_status_flags,
5369 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005370}
5371
5372static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5373{
5374 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005375 egress_err_status_flags,
5376 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005377}
5378
5379static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5380{
5381 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005382 egress_err_info_flags,
5383 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005384}
5385
5386static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5387{
5388 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005389 send_err_status_flags,
5390 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005391}
5392
5393static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5394{
5395 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005396 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005397
5398 /*
5399 * For most these errors, there is nothing that can be done except
5400 * report or record it.
5401 */
5402 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005403 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005404
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005405 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5406 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005407 /* this error requires a manual drop into SPC freeze mode */
5408 /* then a fix up */
5409 start_freeze_handling(dd->pport, FREEZE_SELF);
5410 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005411
5412 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5413 if (reg & (1ull << i)) {
5414 incr_cntr64(&dd->cce_err_status_cnt[i]);
5415 /* maintain a counter over all cce_err_status errors */
5416 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5417 }
5418 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005419}
5420
5421/*
5422 * Check counters for receive errors that do not have an interrupt
5423 * associated with them.
5424 */
5425#define RCVERR_CHECK_TIME 10
5426static void update_rcverr_timer(unsigned long opaque)
5427{
5428 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5429 struct hfi1_pportdata *ppd = dd->pport;
5430 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5431
5432 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005433 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005434 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005435 set_link_down_reason(
5436 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5437 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005438 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5439 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005440 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005441
5442 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5443}
5444
5445static int init_rcverr(struct hfi1_devdata *dd)
5446{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305447 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005448 /* Assume the hardware counter has been reset */
5449 dd->rcv_ovfl_cnt = 0;
5450 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5451}
5452
5453static void free_rcverr(struct hfi1_devdata *dd)
5454{
5455 if (dd->rcverr_timer.data)
5456 del_timer_sync(&dd->rcverr_timer);
5457 dd->rcverr_timer.data = 0;
5458}
5459
5460static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5461{
5462 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005463 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005464
5465 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005466 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005467
5468 if (reg & ALL_RXE_FREEZE_ERR) {
5469 int flags = 0;
5470
5471 /*
5472 * Freeze mode recovery is disabled for the errors
5473 * in RXE_FREEZE_ABORT_MASK
5474 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005475 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005476 flags = FREEZE_ABORT;
5477
5478 start_freeze_handling(dd->pport, flags);
5479 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005480
5481 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5482 if (reg & (1ull << i))
5483 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5484 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005485}
5486
5487static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5488{
5489 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005490 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005491
5492 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005493 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005494 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5495 if (reg & (1ull << i))
5496 incr_cntr64(&dd->misc_err_status_cnt[i]);
5497 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005498}
5499
5500static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5501{
5502 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005503 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005504
5505 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005506 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005507
5508 if (reg & ALL_PIO_FREEZE_ERR)
5509 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005510
5511 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5512 if (reg & (1ull << i))
5513 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5514 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005515}
5516
5517static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518{
5519 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005520 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005521
5522 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005523 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005524
5525 if (reg & ALL_SDMA_FREEZE_ERR)
5526 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005527
5528 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5529 if (reg & (1ull << i))
5530 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5531 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005532}
5533
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005534static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5535{
5536 incr_cntr64(&ppd->port_xmit_discards);
5537}
5538
Mike Marciniszyn77241052015-07-30 15:17:43 -04005539static void count_port_inactive(struct hfi1_devdata *dd)
5540{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005541 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005542}
5543
5544/*
5545 * We have had a "disallowed packet" error during egress. Determine the
5546 * integrity check which failed, and update relevant error counter, etc.
5547 *
5548 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5549 * bit of state per integrity check, and so we can miss the reason for an
5550 * egress error if more than one packet fails the same integrity check
5551 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5552 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005553static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5554 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005555{
5556 struct hfi1_pportdata *ppd = dd->pport;
5557 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5558 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5559 char buf[96];
5560
5561 /* clear down all observed info as quickly as possible after read */
5562 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5563
5564 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005565 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5566 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005567
5568 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005569 if (info & PORT_DISCARD_EGRESS_ERRS) {
5570 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005571
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005572 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005573 * Count all applicable bits as individual errors and
5574 * attribute them to the packet that triggered this handler.
5575 * This may not be completely accurate due to limitations
5576 * on the available hardware error information. There is
5577 * a single information register and any number of error
5578 * packets may have occurred and contributed to it before
5579 * this routine is called. This means that:
5580 * a) If multiple packets with the same error occur before
5581 * this routine is called, earlier packets are missed.
5582 * There is only a single bit for each error type.
5583 * b) Errors may not be attributed to the correct VL.
5584 * The driver is attributing all bits in the info register
5585 * to the packet that triggered this call, but bits
5586 * could be an accumulation of different packets with
5587 * different VLs.
5588 * c) A single error packet may have multiple counts attached
5589 * to it. There is no way for the driver to know if
5590 * multiple bits set in the info register are due to a
5591 * single packet or multiple packets. The driver assumes
5592 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005593 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005594 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005595 for (i = 0; i < weight; i++) {
5596 __count_port_discards(ppd);
5597 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5598 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5599 else if (vl == 15)
5600 incr_cntr64(&ppd->port_xmit_discards_vl
5601 [C_VL_15]);
5602 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005603 }
5604}
5605
5606/*
5607 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5608 * register. Does it represent a 'port inactive' error?
5609 */
5610static inline int port_inactive_err(u64 posn)
5611{
5612 return (posn >= SEES(TX_LINKDOWN) &&
5613 posn <= SEES(TX_INCORRECT_LINK_STATE));
5614}
5615
5616/*
5617 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5618 * register. Does it represent a 'disallowed packet' error?
5619 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005620static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005621{
5622 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5623 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5624}
5625
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005626/*
5627 * Input value is a bit position of one of the SDMA engine disallowed
5628 * packet errors. Return which engine. Use of this must be guarded by
5629 * disallowed_pkt_err().
5630 */
5631static inline int disallowed_pkt_engine(int posn)
5632{
5633 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5634}
5635
5636/*
5637 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5638 * be done.
5639 */
5640static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5641{
5642 struct sdma_vl_map *m;
5643 int vl;
5644
5645 /* range check */
5646 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5647 return -1;
5648
5649 rcu_read_lock();
5650 m = rcu_dereference(dd->sdma_map);
5651 vl = m->engine_to_vl[engine];
5652 rcu_read_unlock();
5653
5654 return vl;
5655}
5656
5657/*
5658 * Translate the send context (sofware index) into a VL. Return -1 if the
5659 * translation cannot be done.
5660 */
5661static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5662{
5663 struct send_context_info *sci;
5664 struct send_context *sc;
5665 int i;
5666
5667 sci = &dd->send_contexts[sw_index];
5668
5669 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005670 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005671 return -1;
5672
5673 sc = sci->sc;
5674 if (!sc)
5675 return -1;
5676 if (dd->vld[15].sc == sc)
5677 return 15;
5678 for (i = 0; i < num_vls; i++)
5679 if (dd->vld[i].sc == sc)
5680 return i;
5681
5682 return -1;
5683}
5684
Mike Marciniszyn77241052015-07-30 15:17:43 -04005685static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5686{
5687 u64 reg_copy = reg, handled = 0;
5688 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005689 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005690
5691 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5692 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005693 else if (is_ax(dd) &&
5694 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5695 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005696 start_freeze_handling(dd->pport, 0);
5697
5698 while (reg_copy) {
5699 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005700 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005701 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005702 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005703
5704 if (port_inactive_err(shift)) {
5705 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005706 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005707 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005708 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5709
5710 handle_send_egress_err_info(dd, vl);
5711 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005712 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005713 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005714 }
5715
5716 reg &= ~handled;
5717
5718 if (reg)
5719 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005720 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005721
5722 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5723 if (reg & (1ull << i))
5724 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5725 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005726}
5727
5728static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5729{
5730 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005731 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005732
5733 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005734 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005735
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005736 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5737 if (reg & (1ull << i))
5738 incr_cntr64(&dd->send_err_status_cnt[i]);
5739 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005740}
5741
5742/*
5743 * The maximum number of times the error clear down will loop before
5744 * blocking a repeating error. This value is arbitrary.
5745 */
5746#define MAX_CLEAR_COUNT 20
5747
5748/*
5749 * Clear and handle an error register. All error interrupts are funneled
5750 * through here to have a central location to correctly handle single-
5751 * or multi-shot errors.
5752 *
5753 * For non per-context registers, call this routine with a context value
5754 * of 0 so the per-context offset is zero.
5755 *
5756 * If the handler loops too many times, assume that something is wrong
5757 * and can't be fixed, so mask the error bits.
5758 */
5759static void interrupt_clear_down(struct hfi1_devdata *dd,
5760 u32 context,
5761 const struct err_reg_info *eri)
5762{
5763 u64 reg;
5764 u32 count;
5765
5766 /* read in a loop until no more errors are seen */
5767 count = 0;
5768 while (1) {
5769 reg = read_kctxt_csr(dd, context, eri->status);
5770 if (reg == 0)
5771 break;
5772 write_kctxt_csr(dd, context, eri->clear, reg);
5773 if (likely(eri->handler))
5774 eri->handler(dd, context, reg);
5775 count++;
5776 if (count > MAX_CLEAR_COUNT) {
5777 u64 mask;
5778
5779 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005780 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005781 /*
5782 * Read-modify-write so any other masked bits
5783 * remain masked.
5784 */
5785 mask = read_kctxt_csr(dd, context, eri->mask);
5786 mask &= ~reg;
5787 write_kctxt_csr(dd, context, eri->mask, mask);
5788 break;
5789 }
5790 }
5791}
5792
5793/*
5794 * CCE block "misc" interrupt. Source is < 16.
5795 */
5796static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5797{
5798 const struct err_reg_info *eri = &misc_errs[source];
5799
5800 if (eri->handler) {
5801 interrupt_clear_down(dd, 0, eri);
5802 } else {
5803 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005804 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005805 }
5806}
5807
5808static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5809{
5810 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005811 sc_err_status_flags,
5812 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005813}
5814
5815/*
5816 * Send context error interrupt. Source (hw_context) is < 160.
5817 *
5818 * All send context errors cause the send context to halt. The normal
5819 * clear-down mechanism cannot be used because we cannot clear the
5820 * error bits until several other long-running items are done first.
5821 * This is OK because with the context halted, nothing else is going
5822 * to happen on it anyway.
5823 */
5824static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5825 unsigned int hw_context)
5826{
5827 struct send_context_info *sci;
5828 struct send_context *sc;
5829 char flags[96];
5830 u64 status;
5831 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005832 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005833
5834 sw_index = dd->hw_to_sw[hw_context];
5835 if (sw_index >= dd->num_send_contexts) {
5836 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005837 "out of range sw index %u for send context %u\n",
5838 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005839 return;
5840 }
5841 sci = &dd->send_contexts[sw_index];
5842 sc = sci->sc;
5843 if (!sc) {
5844 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005845 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005846 return;
5847 }
5848
5849 /* tell the software that a halt has begun */
5850 sc_stop(sc, SCF_HALTED);
5851
5852 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5853
5854 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005855 send_context_err_status_string(flags, sizeof(flags),
5856 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005857
5858 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005859 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005860
5861 /*
5862 * Automatically restart halted kernel contexts out of interrupt
5863 * context. User contexts must ask the driver to restart the context.
5864 */
5865 if (sc->type != SC_USER)
5866 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005867
5868 /*
5869 * Update the counters for the corresponding status bits.
5870 * Note that these particular counters are aggregated over all
5871 * 160 contexts.
5872 */
5873 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5874 if (status & (1ull << i))
5875 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5876 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005877}
5878
5879static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5880 unsigned int source, u64 status)
5881{
5882 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005883 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005884
5885 sde = &dd->per_sdma[source];
5886#ifdef CONFIG_SDMA_VERBOSITY
5887 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5888 slashstrip(__FILE__), __LINE__, __func__);
5889 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5890 sde->this_idx, source, (unsigned long long)status);
5891#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005892 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005893 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005894
5895 /*
5896 * Update the counters for the corresponding status bits.
5897 * Note that these particular counters are aggregated over
5898 * all 16 DMA engines.
5899 */
5900 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5901 if (status & (1ull << i))
5902 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5903 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005904}
5905
5906/*
5907 * CCE block SDMA error interrupt. Source is < 16.
5908 */
5909static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5910{
5911#ifdef CONFIG_SDMA_VERBOSITY
5912 struct sdma_engine *sde = &dd->per_sdma[source];
5913
5914 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5915 slashstrip(__FILE__), __LINE__, __func__);
5916 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5917 source);
5918 sdma_dumpstate(sde);
5919#endif
5920 interrupt_clear_down(dd, source, &sdma_eng_err);
5921}
5922
5923/*
5924 * CCE block "various" interrupt. Source is < 8.
5925 */
5926static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5927{
5928 const struct err_reg_info *eri = &various_err[source];
5929
5930 /*
5931 * TCritInt cannot go through interrupt_clear_down()
5932 * because it is not a second tier interrupt. The handler
5933 * should be called directly.
5934 */
5935 if (source == TCRIT_INT_SOURCE)
5936 handle_temp_err(dd);
5937 else if (eri->handler)
5938 interrupt_clear_down(dd, 0, eri);
5939 else
5940 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005941 "%s: Unimplemented/reserved interrupt %d\n",
5942 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005943}
5944
5945static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5946{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005947 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005948 struct hfi1_pportdata *ppd = dd->pport;
5949 unsigned long flags;
5950 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5951
5952 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005953 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005954 dd_dev_info(dd, "%s: QSFP module removed\n",
5955 __func__);
5956
Mike Marciniszyn77241052015-07-30 15:17:43 -04005957 ppd->driver_link_ready = 0;
5958 /*
5959 * Cable removed, reset all our information about the
5960 * cache and cable capabilities
5961 */
5962
5963 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5964 /*
5965 * We don't set cache_refresh_required here as we expect
5966 * an interrupt when a cable is inserted
5967 */
5968 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005969 ppd->qsfp_info.reset_needed = 0;
5970 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005971 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08005972 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005973 /* Invert the ModPresent pin now to detect plug-in */
5974 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5975 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005976
5977 if ((ppd->offline_disabled_reason >
5978 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005979 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005980 (ppd->offline_disabled_reason ==
5981 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5982 ppd->offline_disabled_reason =
5983 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005984 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005985
Mike Marciniszyn77241052015-07-30 15:17:43 -04005986 if (ppd->host_link_state == HLS_DN_POLL) {
5987 /*
5988 * The link is still in POLL. This means
5989 * that the normal link down processing
5990 * will not happen. We have to do it here
5991 * before turning the DC off.
5992 */
5993 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5994 }
5995 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005996 dd_dev_info(dd, "%s: QSFP module inserted\n",
5997 __func__);
5998
Mike Marciniszyn77241052015-07-30 15:17:43 -04005999 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6000 ppd->qsfp_info.cache_valid = 0;
6001 ppd->qsfp_info.cache_refresh_required = 1;
6002 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006003 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006004
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006005 /*
6006 * Stop inversion of ModPresent pin to detect
6007 * removal of the cable
6008 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006009 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006010 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6011 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6012
6013 ppd->offline_disabled_reason =
6014 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006015 }
6016 }
6017
6018 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006019 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006020 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006021 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6022 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006023 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6024 }
6025
6026 /* Schedule the QSFP work only if there is a cable attached. */
6027 if (qsfp_mod_present(ppd))
6028 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6029}
6030
6031static int request_host_lcb_access(struct hfi1_devdata *dd)
6032{
6033 int ret;
6034
6035 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006036 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6037 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006038 if (ret != HCMD_SUCCESS) {
6039 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006040 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006041 }
6042 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6043}
6044
6045static int request_8051_lcb_access(struct hfi1_devdata *dd)
6046{
6047 int ret;
6048
6049 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006050 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6051 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006052 if (ret != HCMD_SUCCESS) {
6053 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006054 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006055 }
6056 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6057}
6058
6059/*
6060 * Set the LCB selector - allow host access. The DCC selector always
6061 * points to the host.
6062 */
6063static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6064{
6065 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006066 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6067 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006068}
6069
6070/*
6071 * Clear the LCB selector - allow 8051 access. The DCC selector always
6072 * points to the host.
6073 */
6074static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6075{
6076 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006077 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006078}
6079
6080/*
6081 * Acquire LCB access from the 8051. If the host already has access,
6082 * just increment a counter. Otherwise, inform the 8051 that the
6083 * host is taking access.
6084 *
6085 * Returns:
6086 * 0 on success
6087 * -EBUSY if the 8051 has control and cannot be disturbed
6088 * -errno if unable to acquire access from the 8051
6089 */
6090int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6091{
6092 struct hfi1_pportdata *ppd = dd->pport;
6093 int ret = 0;
6094
6095 /*
6096 * Use the host link state lock so the operation of this routine
6097 * { link state check, selector change, count increment } can occur
6098 * as a unit against a link state change. Otherwise there is a
6099 * race between the state change and the count increment.
6100 */
6101 if (sleep_ok) {
6102 mutex_lock(&ppd->hls_lock);
6103 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006104 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006105 udelay(1);
6106 }
6107
6108 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006109 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006110 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006111 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006112 ret = -EBUSY;
6113 goto done;
6114 }
6115
6116 if (dd->lcb_access_count == 0) {
6117 ret = request_host_lcb_access(dd);
6118 if (ret) {
6119 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006120 "%s: unable to acquire LCB access, err %d\n",
6121 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006122 goto done;
6123 }
6124 set_host_lcb_access(dd);
6125 }
6126 dd->lcb_access_count++;
6127done:
6128 mutex_unlock(&ppd->hls_lock);
6129 return ret;
6130}
6131
6132/*
6133 * Release LCB access by decrementing the use count. If the count is moving
6134 * from 1 to 0, inform 8051 that it has control back.
6135 *
6136 * Returns:
6137 * 0 on success
6138 * -errno if unable to release access to the 8051
6139 */
6140int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6141{
6142 int ret = 0;
6143
6144 /*
6145 * Use the host link state lock because the acquire needed it.
6146 * Here, we only need to keep { selector change, count decrement }
6147 * as a unit.
6148 */
6149 if (sleep_ok) {
6150 mutex_lock(&dd->pport->hls_lock);
6151 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006152 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006153 udelay(1);
6154 }
6155
6156 if (dd->lcb_access_count == 0) {
6157 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006158 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006159 goto done;
6160 }
6161
6162 if (dd->lcb_access_count == 1) {
6163 set_8051_lcb_access(dd);
6164 ret = request_8051_lcb_access(dd);
6165 if (ret) {
6166 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006167 "%s: unable to release LCB access, err %d\n",
6168 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006169 /* restore host access if the grant didn't work */
6170 set_host_lcb_access(dd);
6171 goto done;
6172 }
6173 }
6174 dd->lcb_access_count--;
6175done:
6176 mutex_unlock(&dd->pport->hls_lock);
6177 return ret;
6178}
6179
6180/*
6181 * Initialize LCB access variables and state. Called during driver load,
6182 * after most of the initialization is finished.
6183 *
6184 * The DC default is LCB access on for the host. The driver defaults to
6185 * leaving access to the 8051. Assign access now - this constrains the call
6186 * to this routine to be after all LCB set-up is done. In particular, after
6187 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6188 */
6189static void init_lcb_access(struct hfi1_devdata *dd)
6190{
6191 dd->lcb_access_count = 0;
6192}
6193
6194/*
6195 * Write a response back to a 8051 request.
6196 */
6197static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6198{
6199 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006200 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6201 (u64)return_code <<
6202 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6203 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006204}
6205
6206/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006207 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006208 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006209static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006210{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006211 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006212 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006213 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006214 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006215
6216 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6217 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6218 return; /* no request */
6219
6220 /* zero out COMPLETED so the response is seen */
6221 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6222
6223 /* extract request details */
6224 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6225 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6226 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6227 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6228
6229 switch (type) {
6230 case HREQ_LOAD_CONFIG:
6231 case HREQ_SAVE_CONFIG:
6232 case HREQ_READ_CONFIG:
6233 case HREQ_SET_TX_EQ_ABS:
6234 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006235 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006236 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006237 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006238 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6239 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006240 case HREQ_CONFIG_DONE:
6241 hreq_response(dd, HREQ_SUCCESS, 0);
6242 break;
6243
6244 case HREQ_INTERFACE_TEST:
6245 hreq_response(dd, HREQ_SUCCESS, data);
6246 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006247 default:
6248 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6249 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6250 break;
6251 }
6252}
6253
6254static void write_global_credit(struct hfi1_devdata *dd,
6255 u8 vau, u16 total, u16 shared)
6256{
6257 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006258 ((u64)total <<
6259 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6260 ((u64)shared <<
6261 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6262 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006263}
6264
6265/*
6266 * Set up initial VL15 credits of the remote. Assumes the rest of
6267 * the CM credit registers are zero from a previous global or credit reset .
6268 */
6269void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6270{
6271 /* leave shared count at zero for both global and VL15 */
6272 write_global_credit(dd, vau, vl15buf, 0);
6273
6274 /* We may need some credits for another VL when sending packets
6275 * with the snoop interface. Dividing it down the middle for VL15
6276 * and VL0 should suffice.
6277 */
6278 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6279 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6280 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6281 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6282 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6283 } else {
6284 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6285 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6286 }
6287}
6288
6289/*
6290 * Zero all credit details from the previous connection and
6291 * reset the CM manager's internal counters.
6292 */
6293void reset_link_credits(struct hfi1_devdata *dd)
6294{
6295 int i;
6296
6297 /* remove all previous VL credit limits */
6298 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006299 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006300 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6301 write_global_credit(dd, 0, 0, 0);
6302 /* reset the CM block */
6303 pio_send_control(dd, PSC_CM_RESET);
6304}
6305
6306/* convert a vCU to a CU */
6307static u32 vcu_to_cu(u8 vcu)
6308{
6309 return 1 << vcu;
6310}
6311
6312/* convert a CU to a vCU */
6313static u8 cu_to_vcu(u32 cu)
6314{
6315 return ilog2(cu);
6316}
6317
6318/* convert a vAU to an AU */
6319static u32 vau_to_au(u8 vau)
6320{
6321 return 8 * (1 << vau);
6322}
6323
6324static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6325{
6326 ppd->sm_trap_qp = 0x0;
6327 ppd->sa_qp = 0x1;
6328}
6329
6330/*
6331 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6332 */
6333static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6334{
6335 u64 reg;
6336
6337 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6338 write_csr(dd, DC_LCB_CFG_RUN, 0);
6339 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6340 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006341 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006342 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6343 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6344 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006345 write_csr(dd, DCC_CFG_RESET, reg |
6346 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6347 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006348 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006349 if (!abort) {
6350 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6351 write_csr(dd, DCC_CFG_RESET, reg);
6352 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6353 }
6354}
6355
6356/*
6357 * This routine should be called after the link has been transitioned to
6358 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6359 * reset).
6360 *
6361 * The expectation is that the caller of this routine would have taken
6362 * care of properly transitioning the link into the correct state.
6363 */
6364static void dc_shutdown(struct hfi1_devdata *dd)
6365{
6366 unsigned long flags;
6367
6368 spin_lock_irqsave(&dd->dc8051_lock, flags);
6369 if (dd->dc_shutdown) {
6370 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6371 return;
6372 }
6373 dd->dc_shutdown = 1;
6374 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6375 /* Shutdown the LCB */
6376 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006377 /*
6378 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006379 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006380 * itself.
6381 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006382 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6383}
6384
Jubin John4d114fd2016-02-14 20:21:43 -08006385/*
6386 * Calling this after the DC has been brought out of reset should not
6387 * do any damage.
6388 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006389static void dc_start(struct hfi1_devdata *dd)
6390{
6391 unsigned long flags;
6392 int ret;
6393
6394 spin_lock_irqsave(&dd->dc8051_lock, flags);
6395 if (!dd->dc_shutdown)
6396 goto done;
6397 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6398 /* Take the 8051 out of reset */
6399 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6400 /* Wait until 8051 is ready */
6401 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6402 if (ret) {
6403 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006404 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006405 }
6406 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6407 write_csr(dd, DCC_CFG_RESET, 0x10);
6408 /* lcb_shutdown() with abort=1 does not restore these */
6409 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6410 spin_lock_irqsave(&dd->dc8051_lock, flags);
6411 dd->dc_shutdown = 0;
6412done:
6413 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6414}
6415
6416/*
6417 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6418 */
6419static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6420{
6421 u64 rx_radr, tx_radr;
6422 u32 version;
6423
6424 if (dd->icode != ICODE_FPGA_EMULATION)
6425 return;
6426
6427 /*
6428 * These LCB defaults on emulator _s are good, nothing to do here:
6429 * LCB_CFG_TX_FIFOS_RADR
6430 * LCB_CFG_RX_FIFOS_RADR
6431 * LCB_CFG_LN_DCLK
6432 * LCB_CFG_IGNORE_LOST_RCLK
6433 */
6434 if (is_emulator_s(dd))
6435 return;
6436 /* else this is _p */
6437
6438 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006439 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006440 version = 0x2d; /* all B0 use 0x2d or higher settings */
6441
6442 if (version <= 0x12) {
6443 /* release 0x12 and below */
6444
6445 /*
6446 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6447 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6448 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6449 */
6450 rx_radr =
6451 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6452 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6453 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6454 /*
6455 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6456 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6457 */
6458 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6459 } else if (version <= 0x18) {
6460 /* release 0x13 up to 0x18 */
6461 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6462 rx_radr =
6463 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6464 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6465 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6466 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6467 } else if (version == 0x19) {
6468 /* release 0x19 */
6469 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6470 rx_radr =
6471 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6475 } else if (version == 0x1a) {
6476 /* release 0x1a */
6477 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6478 rx_radr =
6479 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6480 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6481 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6482 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6483 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6484 } else {
6485 /* release 0x1b and higher */
6486 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6487 rx_radr =
6488 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6491 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6492 }
6493
6494 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6495 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6496 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006497 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006498 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6499}
6500
6501/*
6502 * Handle a SMA idle message
6503 *
6504 * This is a work-queue function outside of the interrupt.
6505 */
6506void handle_sma_message(struct work_struct *work)
6507{
6508 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6509 sma_message_work);
6510 struct hfi1_devdata *dd = ppd->dd;
6511 u64 msg;
6512 int ret;
6513
Jubin John4d114fd2016-02-14 20:21:43 -08006514 /*
6515 * msg is bytes 1-4 of the 40-bit idle message - the command code
6516 * is stripped off
6517 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006518 ret = read_idle_sma(dd, &msg);
6519 if (ret)
6520 return;
6521 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6522 /*
6523 * React to the SMA message. Byte[1] (0 for us) is the command.
6524 */
6525 switch (msg & 0xff) {
6526 case SMA_IDLE_ARM:
6527 /*
6528 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6529 * State Transitions
6530 *
6531 * Only expected in INIT or ARMED, discard otherwise.
6532 */
6533 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6534 ppd->neighbor_normal = 1;
6535 break;
6536 case SMA_IDLE_ACTIVE:
6537 /*
6538 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6539 * State Transitions
6540 *
6541 * Can activate the node. Discard otherwise.
6542 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006543 if (ppd->host_link_state == HLS_UP_ARMED &&
6544 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006545 ppd->neighbor_normal = 1;
6546 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6547 if (ret)
6548 dd_dev_err(
6549 dd,
6550 "%s: received Active SMA idle message, couldn't set link to Active\n",
6551 __func__);
6552 }
6553 break;
6554 default:
6555 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006556 "%s: received unexpected SMA idle message 0x%llx\n",
6557 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006558 break;
6559 }
6560}
6561
6562static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6563{
6564 u64 rcvctrl;
6565 unsigned long flags;
6566
6567 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6568 rcvctrl = read_csr(dd, RCV_CTRL);
6569 rcvctrl |= add;
6570 rcvctrl &= ~clear;
6571 write_csr(dd, RCV_CTRL, rcvctrl);
6572 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6573}
6574
6575static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6576{
6577 adjust_rcvctrl(dd, add, 0);
6578}
6579
6580static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6581{
6582 adjust_rcvctrl(dd, 0, clear);
6583}
6584
6585/*
6586 * Called from all interrupt handlers to start handling an SPC freeze.
6587 */
6588void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6589{
6590 struct hfi1_devdata *dd = ppd->dd;
6591 struct send_context *sc;
6592 int i;
6593
6594 if (flags & FREEZE_SELF)
6595 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6596
6597 /* enter frozen mode */
6598 dd->flags |= HFI1_FROZEN;
6599
6600 /* notify all SDMA engines that they are going into a freeze */
6601 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6602
6603 /* do halt pre-handling on all enabled send contexts */
6604 for (i = 0; i < dd->num_send_contexts; i++) {
6605 sc = dd->send_contexts[i].sc;
6606 if (sc && (sc->flags & SCF_ENABLED))
6607 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6608 }
6609
6610 /* Send context are frozen. Notify user space */
6611 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6612
6613 if (flags & FREEZE_ABORT) {
6614 dd_dev_err(dd,
6615 "Aborted freeze recovery. Please REBOOT system\n");
6616 return;
6617 }
6618 /* queue non-interrupt handler */
6619 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6620}
6621
6622/*
6623 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6624 * depending on the "freeze" parameter.
6625 *
6626 * No need to return an error if it times out, our only option
6627 * is to proceed anyway.
6628 */
6629static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6630{
6631 unsigned long timeout;
6632 u64 reg;
6633
6634 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6635 while (1) {
6636 reg = read_csr(dd, CCE_STATUS);
6637 if (freeze) {
6638 /* waiting until all indicators are set */
6639 if ((reg & ALL_FROZE) == ALL_FROZE)
6640 return; /* all done */
6641 } else {
6642 /* waiting until all indicators are clear */
6643 if ((reg & ALL_FROZE) == 0)
6644 return; /* all done */
6645 }
6646
6647 if (time_after(jiffies, timeout)) {
6648 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006649 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6650 freeze ? "" : "un", reg & ALL_FROZE,
6651 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006652 return;
6653 }
6654 usleep_range(80, 120);
6655 }
6656}
6657
6658/*
6659 * Do all freeze handling for the RXE block.
6660 */
6661static void rxe_freeze(struct hfi1_devdata *dd)
6662{
6663 int i;
6664
6665 /* disable port */
6666 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6667
6668 /* disable all receive contexts */
6669 for (i = 0; i < dd->num_rcv_contexts; i++)
6670 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6671}
6672
6673/*
6674 * Unfreeze handling for the RXE block - kernel contexts only.
6675 * This will also enable the port. User contexts will do unfreeze
6676 * handling on a per-context basis as they call into the driver.
6677 *
6678 */
6679static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6680{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006681 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006682 int i;
6683
6684 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006685 for (i = 0; i < dd->n_krcv_queues; i++) {
6686 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6687 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6688 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6689 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6690 hfi1_rcvctrl(dd, rcvmask, i);
6691 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006692
6693 /* enable port */
6694 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6695}
6696
6697/*
6698 * Non-interrupt SPC freeze handling.
6699 *
6700 * This is a work-queue function outside of the triggering interrupt.
6701 */
6702void handle_freeze(struct work_struct *work)
6703{
6704 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6705 freeze_work);
6706 struct hfi1_devdata *dd = ppd->dd;
6707
6708 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006709 wait_for_freeze_status(dd, 1);
6710
6711 /* SPC is now frozen */
6712
6713 /* do send PIO freeze steps */
6714 pio_freeze(dd);
6715
6716 /* do send DMA freeze steps */
6717 sdma_freeze(dd);
6718
6719 /* do send egress freeze steps - nothing to do */
6720
6721 /* do receive freeze steps */
6722 rxe_freeze(dd);
6723
6724 /*
6725 * Unfreeze the hardware - clear the freeze, wait for each
6726 * block's frozen bit to clear, then clear the frozen flag.
6727 */
6728 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6729 wait_for_freeze_status(dd, 0);
6730
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006731 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006732 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6733 wait_for_freeze_status(dd, 1);
6734 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6735 wait_for_freeze_status(dd, 0);
6736 }
6737
6738 /* do send PIO unfreeze steps for kernel contexts */
6739 pio_kernel_unfreeze(dd);
6740
6741 /* do send DMA unfreeze steps */
6742 sdma_unfreeze(dd);
6743
6744 /* do send egress unfreeze steps - nothing to do */
6745
6746 /* do receive unfreeze steps for kernel contexts */
6747 rxe_kernel_unfreeze(dd);
6748
6749 /*
6750 * The unfreeze procedure touches global device registers when
6751 * it disables and re-enables RXE. Mark the device unfrozen
6752 * after all that is done so other parts of the driver waiting
6753 * for the device to unfreeze don't do things out of order.
6754 *
6755 * The above implies that the meaning of HFI1_FROZEN flag is
6756 * "Device has gone into freeze mode and freeze mode handling
6757 * is still in progress."
6758 *
6759 * The flag will be removed when freeze mode processing has
6760 * completed.
6761 */
6762 dd->flags &= ~HFI1_FROZEN;
6763 wake_up(&dd->event_queue);
6764
6765 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006766}
6767
6768/*
6769 * Handle a link up interrupt from the 8051.
6770 *
6771 * This is a work-queue function outside of the interrupt.
6772 */
6773void handle_link_up(struct work_struct *work)
6774{
6775 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006776 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006777 set_link_state(ppd, HLS_UP_INIT);
6778
6779 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6780 read_ltp_rtt(ppd->dd);
6781 /*
6782 * OPA specifies that certain counters are cleared on a transition
6783 * to link up, so do that.
6784 */
6785 clear_linkup_counters(ppd->dd);
6786 /*
6787 * And (re)set link up default values.
6788 */
6789 set_linkup_defaults(ppd);
6790
6791 /* enforce link speed enabled */
6792 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6793 /* oops - current speed is not enabled, bounce */
6794 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006795 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6796 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006797 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006798 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006799 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006800 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006801 start_link(ppd);
6802 }
6803}
6804
Jubin John4d114fd2016-02-14 20:21:43 -08006805/*
6806 * Several pieces of LNI information were cached for SMA in ppd.
6807 * Reset these on link down
6808 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006809static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6810{
6811 ppd->neighbor_guid = 0;
6812 ppd->neighbor_port_number = 0;
6813 ppd->neighbor_type = 0;
6814 ppd->neighbor_fm_security = 0;
6815}
6816
Dean Luickfeb831d2016-04-14 08:31:36 -07006817static const char * const link_down_reason_strs[] = {
6818 [OPA_LINKDOWN_REASON_NONE] = "None",
6819 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6820 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6821 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6822 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6823 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6824 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6825 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6826 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6827 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6828 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6829 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6830 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6831 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6832 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6833 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6834 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6835 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6836 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6837 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6838 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6839 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6840 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6841 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6842 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6843 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6844 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6848 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6849 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6850 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6851 "Excessive buffer overrun",
6852 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6853 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6854 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6855 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6856 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6857 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6858 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6859 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6860 "Local media not installed",
6861 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6862 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6863 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6864 "End to end not installed",
6865 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6866 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6867 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6868 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6869 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6870 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6871};
6872
6873/* return the neighbor link down reason string */
6874static const char *link_down_reason_str(u8 reason)
6875{
6876 const char *str = NULL;
6877
6878 if (reason < ARRAY_SIZE(link_down_reason_strs))
6879 str = link_down_reason_strs[reason];
6880 if (!str)
6881 str = "(invalid)";
6882
6883 return str;
6884}
6885
Mike Marciniszyn77241052015-07-30 15:17:43 -04006886/*
6887 * Handle a link down interrupt from the 8051.
6888 *
6889 * This is a work-queue function outside of the interrupt.
6890 */
6891void handle_link_down(struct work_struct *work)
6892{
6893 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006894 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006895 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006896 link_down_work);
6897 int was_up;
6898 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006899
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006900 if ((ppd->host_link_state &
6901 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6902 ppd->port_type == PORT_TYPE_FIXED)
6903 ppd->offline_disabled_reason =
6904 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6905
6906 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006907 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006908 set_link_state(ppd, HLS_DN_OFFLINE);
6909
Dean Luickfeb831d2016-04-14 08:31:36 -07006910 if (was_up) {
6911 lcl_reason = 0;
6912 /* link down reason is only valid if the link was up */
6913 read_link_down_reason(ppd->dd, &link_down_reason);
6914 switch (link_down_reason) {
6915 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6916 /* the link went down, no idle message reason */
6917 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6918 ldr_str);
6919 break;
6920 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6921 /*
6922 * The neighbor reason is only valid if an idle message
6923 * was received for it.
6924 */
6925 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6926 dd_dev_info(ppd->dd,
6927 "%sNeighbor link down message %d, %s\n",
6928 ldr_str, neigh_reason,
6929 link_down_reason_str(neigh_reason));
6930 break;
6931 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6932 dd_dev_info(ppd->dd,
6933 "%sHost requested link to go offline\n",
6934 ldr_str);
6935 break;
6936 default:
6937 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6938 ldr_str, link_down_reason);
6939 break;
6940 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006941
Dean Luickfeb831d2016-04-14 08:31:36 -07006942 /*
6943 * If no reason, assume peer-initiated but missed
6944 * LinkGoingDown idle flits.
6945 */
6946 if (neigh_reason == 0)
6947 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6948 } else {
6949 /* went down while polling or going up */
6950 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6951 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006952
6953 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6954
Dean Luick015e91f2016-04-14 08:31:42 -07006955 /* inform the SMA when the link transitions from up to down */
6956 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6957 ppd->neigh_link_down_reason.sma == 0) {
6958 ppd->local_link_down_reason.sma =
6959 ppd->local_link_down_reason.latest;
6960 ppd->neigh_link_down_reason.sma =
6961 ppd->neigh_link_down_reason.latest;
6962 }
6963
Mike Marciniszyn77241052015-07-30 15:17:43 -04006964 reset_neighbor_info(ppd);
6965
6966 /* disable the port */
6967 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6968
Jubin John4d114fd2016-02-14 20:21:43 -08006969 /*
6970 * If there is no cable attached, turn the DC off. Otherwise,
6971 * start the link bring up.
6972 */
Easwar Hariharan623bba22016-04-12 11:25:57 -07006973 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006974 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006975 } else {
6976 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006977 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006978 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006979}
6980
6981void handle_link_bounce(struct work_struct *work)
6982{
6983 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6984 link_bounce_work);
6985
6986 /*
6987 * Only do something if the link is currently up.
6988 */
6989 if (ppd->host_link_state & HLS_UP) {
6990 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006991 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006992 start_link(ppd);
6993 } else {
6994 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006995 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006996 }
6997}
6998
6999/*
7000 * Mask conversion: Capability exchange to Port LTP. The capability
7001 * exchange has an implicit 16b CRC that is mandatory.
7002 */
7003static int cap_to_port_ltp(int cap)
7004{
7005 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7006
7007 if (cap & CAP_CRC_14B)
7008 port_ltp |= PORT_LTP_CRC_MODE_14;
7009 if (cap & CAP_CRC_48B)
7010 port_ltp |= PORT_LTP_CRC_MODE_48;
7011 if (cap & CAP_CRC_12B_16B_PER_LANE)
7012 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7013
7014 return port_ltp;
7015}
7016
7017/*
7018 * Convert an OPA Port LTP mask to capability mask
7019 */
7020int port_ltp_to_cap(int port_ltp)
7021{
7022 int cap_mask = 0;
7023
7024 if (port_ltp & PORT_LTP_CRC_MODE_14)
7025 cap_mask |= CAP_CRC_14B;
7026 if (port_ltp & PORT_LTP_CRC_MODE_48)
7027 cap_mask |= CAP_CRC_48B;
7028 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7029 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7030
7031 return cap_mask;
7032}
7033
7034/*
7035 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7036 */
7037static int lcb_to_port_ltp(int lcb_crc)
7038{
7039 int port_ltp = 0;
7040
7041 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7042 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7043 else if (lcb_crc == LCB_CRC_48B)
7044 port_ltp = PORT_LTP_CRC_MODE_48;
7045 else if (lcb_crc == LCB_CRC_14B)
7046 port_ltp = PORT_LTP_CRC_MODE_14;
7047 else
7048 port_ltp = PORT_LTP_CRC_MODE_16;
7049
7050 return port_ltp;
7051}
7052
7053/*
7054 * Our neighbor has indicated that we are allowed to act as a fabric
7055 * manager, so place the full management partition key in the second
7056 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7057 * that we should already have the limited management partition key in
7058 * array element 1, and also that the port is not yet up when
7059 * add_full_mgmt_pkey() is invoked.
7060 */
7061static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7062{
7063 struct hfi1_devdata *dd = ppd->dd;
7064
Dean Luick87645222015-12-01 15:38:21 -05007065 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7066 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7067 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7068 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007069 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7070 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7071}
7072
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007073static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007074{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007075 if (ppd->pkeys[2] != 0) {
7076 ppd->pkeys[2] = 0;
7077 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7078 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007079}
7080
Mike Marciniszyn77241052015-07-30 15:17:43 -04007081/*
7082 * Convert the given link width to the OPA link width bitmask.
7083 */
7084static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7085{
7086 switch (width) {
7087 case 0:
7088 /*
7089 * Simulator and quick linkup do not set the width.
7090 * Just set it to 4x without complaint.
7091 */
7092 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7093 return OPA_LINK_WIDTH_4X;
7094 return 0; /* no lanes up */
7095 case 1: return OPA_LINK_WIDTH_1X;
7096 case 2: return OPA_LINK_WIDTH_2X;
7097 case 3: return OPA_LINK_WIDTH_3X;
7098 default:
7099 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007100 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007101 /* fall through */
7102 case 4: return OPA_LINK_WIDTH_4X;
7103 }
7104}
7105
7106/*
7107 * Do a population count on the bottom nibble.
7108 */
7109static const u8 bit_counts[16] = {
7110 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7111};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007112
Mike Marciniszyn77241052015-07-30 15:17:43 -04007113static inline u8 nibble_to_count(u8 nibble)
7114{
7115 return bit_counts[nibble & 0xf];
7116}
7117
7118/*
7119 * Read the active lane information from the 8051 registers and return
7120 * their widths.
7121 *
7122 * Active lane information is found in these 8051 registers:
7123 * enable_lane_tx
7124 * enable_lane_rx
7125 */
7126static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7127 u16 *rx_width)
7128{
7129 u16 tx, rx;
7130 u8 enable_lane_rx;
7131 u8 enable_lane_tx;
7132 u8 tx_polarity_inversion;
7133 u8 rx_polarity_inversion;
7134 u8 max_rate;
7135
7136 /* read the active lanes */
7137 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007138 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007139 read_local_lni(dd, &enable_lane_rx);
7140
7141 /* convert to counts */
7142 tx = nibble_to_count(enable_lane_tx);
7143 rx = nibble_to_count(enable_lane_rx);
7144
7145 /*
7146 * Set link_speed_active here, overriding what was set in
7147 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7148 * set the max_rate field in handle_verify_cap until v0.19.
7149 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007150 if ((dd->icode == ICODE_RTL_SILICON) &&
7151 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007152 /* max_rate: 0 = 12.5G, 1 = 25G */
7153 switch (max_rate) {
7154 case 0:
7155 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7156 break;
7157 default:
7158 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007159 "%s: unexpected max rate %d, using 25Gb\n",
7160 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007161 /* fall through */
7162 case 1:
7163 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7164 break;
7165 }
7166 }
7167
7168 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007169 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7170 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007171 *tx_width = link_width_to_bits(dd, tx);
7172 *rx_width = link_width_to_bits(dd, rx);
7173}
7174
7175/*
7176 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7177 * Valid after the end of VerifyCap and during LinkUp. Does not change
7178 * after link up. I.e. look elsewhere for downgrade information.
7179 *
7180 * Bits are:
7181 * + bits [7:4] contain the number of active transmitters
7182 * + bits [3:0] contain the number of active receivers
7183 * These are numbers 1 through 4 and can be different values if the
7184 * link is asymmetric.
7185 *
7186 * verify_cap_local_fm_link_width[0] retains its original value.
7187 */
7188static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7189 u16 *rx_width)
7190{
7191 u16 widths, tx, rx;
7192 u8 misc_bits, local_flags;
7193 u16 active_tx, active_rx;
7194
7195 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7196 tx = widths >> 12;
7197 rx = (widths >> 8) & 0xf;
7198
7199 *tx_width = link_width_to_bits(dd, tx);
7200 *rx_width = link_width_to_bits(dd, rx);
7201
7202 /* print the active widths */
7203 get_link_widths(dd, &active_tx, &active_rx);
7204}
7205
7206/*
7207 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7208 * hardware information when the link first comes up.
7209 *
7210 * The link width is not available until after VerifyCap.AllFramesReceived
7211 * (the trigger for handle_verify_cap), so this is outside that routine
7212 * and should be called when the 8051 signals linkup.
7213 */
7214void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7215{
7216 u16 tx_width, rx_width;
7217
7218 /* get end-of-LNI link widths */
7219 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7220
7221 /* use tx_width as the link is supposed to be symmetric on link up */
7222 ppd->link_width_active = tx_width;
7223 /* link width downgrade active (LWD.A) starts out matching LW.A */
7224 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7225 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7226 /* per OPA spec, on link up LWD.E resets to LWD.S */
7227 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7228 /* cache the active egress rate (units {10^6 bits/sec]) */
7229 ppd->current_egress_rate = active_egress_rate(ppd);
7230}
7231
7232/*
7233 * Handle a verify capabilities interrupt from the 8051.
7234 *
7235 * This is a work-queue function outside of the interrupt.
7236 */
7237void handle_verify_cap(struct work_struct *work)
7238{
7239 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7240 link_vc_work);
7241 struct hfi1_devdata *dd = ppd->dd;
7242 u64 reg;
7243 u8 power_management;
7244 u8 continious;
7245 u8 vcu;
7246 u8 vau;
7247 u8 z;
7248 u16 vl15buf;
7249 u16 link_widths;
7250 u16 crc_mask;
7251 u16 crc_val;
7252 u16 device_id;
7253 u16 active_tx, active_rx;
7254 u8 partner_supported_crc;
7255 u8 remote_tx_rate;
7256 u8 device_rev;
7257
7258 set_link_state(ppd, HLS_VERIFY_CAP);
7259
7260 lcb_shutdown(dd, 0);
7261 adjust_lcb_for_fpga_serdes(dd);
7262
7263 /*
7264 * These are now valid:
7265 * remote VerifyCap fields in the general LNI config
7266 * CSR DC8051_STS_REMOTE_GUID
7267 * CSR DC8051_STS_REMOTE_NODE_TYPE
7268 * CSR DC8051_STS_REMOTE_FM_SECURITY
7269 * CSR DC8051_STS_REMOTE_PORT_NO
7270 */
7271
7272 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007273 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7274 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007275 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7276 read_remote_device_id(dd, &device_id, &device_rev);
7277 /*
7278 * And the 'MgmtAllowed' information, which is exchanged during
7279 * LNI, is also be available at this point.
7280 */
7281 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7282 /* print the active widths */
7283 get_link_widths(dd, &active_tx, &active_rx);
7284 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007285 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7286 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007287 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007288 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7289 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7290 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007291 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007292 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007293 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007294 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007295 /*
7296 * The peer vAU value just read is the peer receiver value. HFI does
7297 * not support a transmit vAU of 0 (AU == 8). We advertised that
7298 * with Z=1 in the fabric capabilities sent to the peer. The peer
7299 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7300 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7301 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7302 * subject to the Z value exception.
7303 */
7304 if (vau == 0)
7305 vau = 1;
7306 set_up_vl15(dd, vau, vl15buf);
7307
7308 /* set up the LCB CRC mode */
7309 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7310
7311 /* order is important: use the lowest bit in common */
7312 if (crc_mask & CAP_CRC_14B)
7313 crc_val = LCB_CRC_14B;
7314 else if (crc_mask & CAP_CRC_48B)
7315 crc_val = LCB_CRC_48B;
7316 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7317 crc_val = LCB_CRC_12B_16B_PER_LANE;
7318 else
7319 crc_val = LCB_CRC_16B;
7320
7321 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7322 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7323 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7324
7325 /* set (14b only) or clear sideband credit */
7326 reg = read_csr(dd, SEND_CM_CTRL);
7327 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7328 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007329 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007330 } else {
7331 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007332 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007333 }
7334
7335 ppd->link_speed_active = 0; /* invalid value */
7336 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7337 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7338 switch (remote_tx_rate) {
7339 case 0:
7340 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7341 break;
7342 case 1:
7343 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7344 break;
7345 }
7346 } else {
7347 /* actual rate is highest bit of the ANDed rates */
7348 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7349
7350 if (rate & 2)
7351 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7352 else if (rate & 1)
7353 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7354 }
7355 if (ppd->link_speed_active == 0) {
7356 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007357 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007358 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7359 }
7360
7361 /*
7362 * Cache the values of the supported, enabled, and active
7363 * LTP CRC modes to return in 'portinfo' queries. But the bit
7364 * flags that are returned in the portinfo query differ from
7365 * what's in the link_crc_mask, crc_sizes, and crc_val
7366 * variables. Convert these here.
7367 */
7368 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7369 /* supported crc modes */
7370 ppd->port_ltp_crc_mode |=
7371 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7372 /* enabled crc modes */
7373 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7374 /* active crc mode */
7375
7376 /* set up the remote credit return table */
7377 assign_remote_cm_au_table(dd, vcu);
7378
7379 /*
7380 * The LCB is reset on entry to handle_verify_cap(), so this must
7381 * be applied on every link up.
7382 *
7383 * Adjust LCB error kill enable to kill the link if
7384 * these RBUF errors are seen:
7385 * REPLAY_BUF_MBE_SMASK
7386 * FLIT_INPUT_BUF_MBE_SMASK
7387 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007388 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007389 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7390 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7391 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7392 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7393 }
7394
7395 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7396 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7397
7398 /* give 8051 access to the LCB CSRs */
7399 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7400 set_8051_lcb_access(dd);
7401
7402 ppd->neighbor_guid =
7403 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7404 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7405 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7406 ppd->neighbor_type =
7407 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7408 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7409 ppd->neighbor_fm_security =
7410 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7411 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7412 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007413 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7414 ppd->neighbor_guid, ppd->neighbor_type,
7415 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007416 if (ppd->mgmt_allowed)
7417 add_full_mgmt_pkey(ppd);
7418
7419 /* tell the 8051 to go to LinkUp */
7420 set_link_state(ppd, HLS_GOING_UP);
7421}
7422
7423/*
7424 * Apply the link width downgrade enabled policy against the current active
7425 * link widths.
7426 *
7427 * Called when the enabled policy changes or the active link widths change.
7428 */
7429void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7430{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007431 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007432 int tries;
7433 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007434 u16 tx, rx;
7435
Dean Luick323fd782015-11-16 21:59:24 -05007436 /* use the hls lock to avoid a race with actual link up */
7437 tries = 0;
7438retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007439 mutex_lock(&ppd->hls_lock);
7440 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007441 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007442 /* still going up..wait and retry */
7443 if (ppd->host_link_state & HLS_GOING_UP) {
7444 if (++tries < 1000) {
7445 mutex_unlock(&ppd->hls_lock);
7446 usleep_range(100, 120); /* arbitrary */
7447 goto retry;
7448 }
7449 dd_dev_err(ppd->dd,
7450 "%s: giving up waiting for link state change\n",
7451 __func__);
7452 }
7453 goto done;
7454 }
7455
7456 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007457
7458 if (refresh_widths) {
7459 get_link_widths(ppd->dd, &tx, &rx);
7460 ppd->link_width_downgrade_tx_active = tx;
7461 ppd->link_width_downgrade_rx_active = rx;
7462 }
7463
Dean Luickf9b56352016-04-14 08:31:30 -07007464 if (ppd->link_width_downgrade_tx_active == 0 ||
7465 ppd->link_width_downgrade_rx_active == 0) {
7466 /* the 8051 reported a dead link as a downgrade */
7467 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7468 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007469 /* downgrade is disabled */
7470
7471 /* bounce if not at starting active width */
7472 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007473 ppd->link_width_downgrade_tx_active) ||
7474 (ppd->link_width_active !=
7475 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007476 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007477 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007478 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007479 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7480 ppd->link_width_active,
7481 ppd->link_width_downgrade_tx_active,
7482 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007483 do_bounce = 1;
7484 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007485 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7486 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007487 /* Tx or Rx is outside the enabled policy */
7488 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007489 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007490 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007491 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7492 lwde, ppd->link_width_downgrade_tx_active,
7493 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007494 do_bounce = 1;
7495 }
7496
Dean Luick323fd782015-11-16 21:59:24 -05007497done:
7498 mutex_unlock(&ppd->hls_lock);
7499
Mike Marciniszyn77241052015-07-30 15:17:43 -04007500 if (do_bounce) {
7501 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007502 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007503 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007504 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007505 start_link(ppd);
7506 }
7507}
7508
7509/*
7510 * Handle a link downgrade interrupt from the 8051.
7511 *
7512 * This is a work-queue function outside of the interrupt.
7513 */
7514void handle_link_downgrade(struct work_struct *work)
7515{
7516 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7517 link_downgrade_work);
7518
7519 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7520 apply_link_downgrade_policy(ppd, 1);
7521}
7522
7523static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7524{
7525 return flag_string(buf, buf_len, flags, dcc_err_flags,
7526 ARRAY_SIZE(dcc_err_flags));
7527}
7528
7529static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7530{
7531 return flag_string(buf, buf_len, flags, lcb_err_flags,
7532 ARRAY_SIZE(lcb_err_flags));
7533}
7534
7535static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7536{
7537 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7538 ARRAY_SIZE(dc8051_err_flags));
7539}
7540
7541static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7542{
7543 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7544 ARRAY_SIZE(dc8051_info_err_flags));
7545}
7546
7547static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7548{
7549 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7550 ARRAY_SIZE(dc8051_info_host_msg_flags));
7551}
7552
7553static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7554{
7555 struct hfi1_pportdata *ppd = dd->pport;
7556 u64 info, err, host_msg;
7557 int queue_link_down = 0;
7558 char buf[96];
7559
7560 /* look at the flags */
7561 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7562 /* 8051 information set by firmware */
7563 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7564 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7565 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7566 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7567 host_msg = (info >>
7568 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7569 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7570
7571 /*
7572 * Handle error flags.
7573 */
7574 if (err & FAILED_LNI) {
7575 /*
7576 * LNI error indications are cleared by the 8051
7577 * only when starting polling. Only pay attention
7578 * to them when in the states that occur during
7579 * LNI.
7580 */
7581 if (ppd->host_link_state
7582 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7583 queue_link_down = 1;
7584 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007585 dc8051_info_err_string(buf,
7586 sizeof(buf),
7587 err &
7588 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007589 }
7590 err &= ~(u64)FAILED_LNI;
7591 }
Dean Luick6d014532015-12-01 15:38:23 -05007592 /* unknown frames can happen durning LNI, just count */
7593 if (err & UNKNOWN_FRAME) {
7594 ppd->unknown_frame_count++;
7595 err &= ~(u64)UNKNOWN_FRAME;
7596 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007597 if (err) {
7598 /* report remaining errors, but do not do anything */
7599 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007600 dc8051_info_err_string(buf, sizeof(buf),
7601 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007602 }
7603
7604 /*
7605 * Handle host message flags.
7606 */
7607 if (host_msg & HOST_REQ_DONE) {
7608 /*
7609 * Presently, the driver does a busy wait for
7610 * host requests to complete. This is only an
7611 * informational message.
7612 * NOTE: The 8051 clears the host message
7613 * information *on the next 8051 command*.
7614 * Therefore, when linkup is achieved,
7615 * this flag will still be set.
7616 */
7617 host_msg &= ~(u64)HOST_REQ_DONE;
7618 }
7619 if (host_msg & BC_SMA_MSG) {
7620 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7621 host_msg &= ~(u64)BC_SMA_MSG;
7622 }
7623 if (host_msg & LINKUP_ACHIEVED) {
7624 dd_dev_info(dd, "8051: Link up\n");
7625 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7626 host_msg &= ~(u64)LINKUP_ACHIEVED;
7627 }
7628 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007629 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007630 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7631 }
7632 if (host_msg & VERIFY_CAP_FRAME) {
7633 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7634 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7635 }
7636 if (host_msg & LINK_GOING_DOWN) {
7637 const char *extra = "";
7638 /* no downgrade action needed if going down */
7639 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7640 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7641 extra = " (ignoring downgrade)";
7642 }
7643 dd_dev_info(dd, "8051: Link down%s\n", extra);
7644 queue_link_down = 1;
7645 host_msg &= ~(u64)LINK_GOING_DOWN;
7646 }
7647 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7648 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7649 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7650 }
7651 if (host_msg) {
7652 /* report remaining messages, but do not do anything */
7653 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007654 dc8051_info_host_msg_string(buf,
7655 sizeof(buf),
7656 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007657 }
7658
7659 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7660 }
7661 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7662 /*
7663 * Lost the 8051 heartbeat. If this happens, we
7664 * receive constant interrupts about it. Disable
7665 * the interrupt after the first.
7666 */
7667 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7668 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007669 read_csr(dd, DC_DC8051_ERR_EN) &
7670 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007671
7672 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7673 }
7674 if (reg) {
7675 /* report the error, but do not do anything */
7676 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007677 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007678 }
7679
7680 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007681 /*
7682 * if the link is already going down or disabled, do not
7683 * queue another
7684 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007685 if ((ppd->host_link_state &
7686 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7687 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007688 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007689 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007690 } else {
7691 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7692 }
7693 }
7694}
7695
7696static const char * const fm_config_txt[] = {
7697[0] =
7698 "BadHeadDist: Distance violation between two head flits",
7699[1] =
7700 "BadTailDist: Distance violation between two tail flits",
7701[2] =
7702 "BadCtrlDist: Distance violation between two credit control flits",
7703[3] =
7704 "BadCrdAck: Credits return for unsupported VL",
7705[4] =
7706 "UnsupportedVLMarker: Received VL Marker",
7707[5] =
7708 "BadPreempt: Exceeded the preemption nesting level",
7709[6] =
7710 "BadControlFlit: Received unsupported control flit",
7711/* no 7 */
7712[8] =
7713 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7714};
7715
7716static const char * const port_rcv_txt[] = {
7717[1] =
7718 "BadPktLen: Illegal PktLen",
7719[2] =
7720 "PktLenTooLong: Packet longer than PktLen",
7721[3] =
7722 "PktLenTooShort: Packet shorter than PktLen",
7723[4] =
7724 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7725[5] =
7726 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7727[6] =
7728 "BadL2: Illegal L2 opcode",
7729[7] =
7730 "BadSC: Unsupported SC",
7731[9] =
7732 "BadRC: Illegal RC",
7733[11] =
7734 "PreemptError: Preempting with same VL",
7735[12] =
7736 "PreemptVL15: Preempting a VL15 packet",
7737};
7738
7739#define OPA_LDR_FMCONFIG_OFFSET 16
7740#define OPA_LDR_PORTRCV_OFFSET 0
7741static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7742{
7743 u64 info, hdr0, hdr1;
7744 const char *extra;
7745 char buf[96];
7746 struct hfi1_pportdata *ppd = dd->pport;
7747 u8 lcl_reason = 0;
7748 int do_bounce = 0;
7749
7750 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7751 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7752 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7753 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7754 /* set status bit */
7755 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7756 }
7757 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7758 }
7759
7760 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7761 struct hfi1_pportdata *ppd = dd->pport;
7762 /* this counter saturates at (2^32) - 1 */
7763 if (ppd->link_downed < (u32)UINT_MAX)
7764 ppd->link_downed++;
7765 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7766 }
7767
7768 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7769 u8 reason_valid = 1;
7770
7771 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7772 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7773 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7774 /* set status bit */
7775 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7776 }
7777 switch (info) {
7778 case 0:
7779 case 1:
7780 case 2:
7781 case 3:
7782 case 4:
7783 case 5:
7784 case 6:
7785 extra = fm_config_txt[info];
7786 break;
7787 case 8:
7788 extra = fm_config_txt[info];
7789 if (ppd->port_error_action &
7790 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7791 do_bounce = 1;
7792 /*
7793 * lcl_reason cannot be derived from info
7794 * for this error
7795 */
7796 lcl_reason =
7797 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7798 }
7799 break;
7800 default:
7801 reason_valid = 0;
7802 snprintf(buf, sizeof(buf), "reserved%lld", info);
7803 extra = buf;
7804 break;
7805 }
7806
7807 if (reason_valid && !do_bounce) {
7808 do_bounce = ppd->port_error_action &
7809 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7810 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7811 }
7812
7813 /* just report this */
7814 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7815 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7816 }
7817
7818 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7819 u8 reason_valid = 1;
7820
7821 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7822 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7823 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7824 if (!(dd->err_info_rcvport.status_and_code &
7825 OPA_EI_STATUS_SMASK)) {
7826 dd->err_info_rcvport.status_and_code =
7827 info & OPA_EI_CODE_SMASK;
7828 /* set status bit */
7829 dd->err_info_rcvport.status_and_code |=
7830 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007831 /*
7832 * save first 2 flits in the packet that caused
7833 * the error
7834 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007835 dd->err_info_rcvport.packet_flit1 = hdr0;
7836 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007837 }
7838 switch (info) {
7839 case 1:
7840 case 2:
7841 case 3:
7842 case 4:
7843 case 5:
7844 case 6:
7845 case 7:
7846 case 9:
7847 case 11:
7848 case 12:
7849 extra = port_rcv_txt[info];
7850 break;
7851 default:
7852 reason_valid = 0;
7853 snprintf(buf, sizeof(buf), "reserved%lld", info);
7854 extra = buf;
7855 break;
7856 }
7857
7858 if (reason_valid && !do_bounce) {
7859 do_bounce = ppd->port_error_action &
7860 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7861 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7862 }
7863
7864 /* just report this */
7865 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7866 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007867 hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007868
7869 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7870 }
7871
7872 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7873 /* informative only */
7874 dd_dev_info(dd, "8051 access to LCB blocked\n");
7875 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7876 }
7877 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7878 /* informative only */
7879 dd_dev_info(dd, "host access to LCB blocked\n");
7880 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7881 }
7882
7883 /* report any remaining errors */
7884 if (reg)
7885 dd_dev_info(dd, "DCC Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007886 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007887
7888 if (lcl_reason == 0)
7889 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7890
7891 if (do_bounce) {
7892 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7893 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7894 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7895 }
7896}
7897
7898static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7899{
7900 char buf[96];
7901
7902 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007903 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007904}
7905
7906/*
7907 * CCE block DC interrupt. Source is < 8.
7908 */
7909static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7910{
7911 const struct err_reg_info *eri = &dc_errs[source];
7912
7913 if (eri->handler) {
7914 interrupt_clear_down(dd, 0, eri);
7915 } else if (source == 3 /* dc_lbm_int */) {
7916 /*
7917 * This indicates that a parity error has occurred on the
7918 * address/control lines presented to the LBM. The error
7919 * is a single pulse, there is no associated error flag,
7920 * and it is non-maskable. This is because if a parity
7921 * error occurs on the request the request is dropped.
7922 * This should never occur, but it is nice to know if it
7923 * ever does.
7924 */
7925 dd_dev_err(dd, "Parity error in DC LBM block\n");
7926 } else {
7927 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7928 }
7929}
7930
7931/*
7932 * TX block send credit interrupt. Source is < 160.
7933 */
7934static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7935{
7936 sc_group_release_update(dd, source);
7937}
7938
7939/*
7940 * TX block SDMA interrupt. Source is < 48.
7941 *
7942 * SDMA interrupts are grouped by type:
7943 *
7944 * 0 - N-1 = SDma
7945 * N - 2N-1 = SDmaProgress
7946 * 2N - 3N-1 = SDmaIdle
7947 */
7948static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7949{
7950 /* what interrupt */
7951 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7952 /* which engine */
7953 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7954
7955#ifdef CONFIG_SDMA_VERBOSITY
7956 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7957 slashstrip(__FILE__), __LINE__, __func__);
7958 sdma_dumpstate(&dd->per_sdma[which]);
7959#endif
7960
7961 if (likely(what < 3 && which < dd->num_sdma)) {
7962 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7963 } else {
7964 /* should not happen */
7965 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7966 }
7967}
7968
7969/*
7970 * RX block receive available interrupt. Source is < 160.
7971 */
7972static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7973{
7974 struct hfi1_ctxtdata *rcd;
7975 char *err_detail;
7976
7977 if (likely(source < dd->num_rcv_contexts)) {
7978 rcd = dd->rcd[source];
7979 if (rcd) {
7980 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007981 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007982 else
7983 handle_user_interrupt(rcd);
7984 return; /* OK */
7985 }
7986 /* received an interrupt, but no rcd */
7987 err_detail = "dataless";
7988 } else {
7989 /* received an interrupt, but are not using that context */
7990 err_detail = "out of range";
7991 }
7992 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007993 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007994}
7995
7996/*
7997 * RX block receive urgent interrupt. Source is < 160.
7998 */
7999static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8000{
8001 struct hfi1_ctxtdata *rcd;
8002 char *err_detail;
8003
8004 if (likely(source < dd->num_rcv_contexts)) {
8005 rcd = dd->rcd[source];
8006 if (rcd) {
8007 /* only pay attention to user urgent interrupts */
8008 if (source >= dd->first_user_ctxt)
8009 handle_user_interrupt(rcd);
8010 return; /* OK */
8011 }
8012 /* received an interrupt, but no rcd */
8013 err_detail = "dataless";
8014 } else {
8015 /* received an interrupt, but are not using that context */
8016 err_detail = "out of range";
8017 }
8018 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008019 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008020}
8021
8022/*
8023 * Reserved range interrupt. Should not be called in normal operation.
8024 */
8025static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8026{
8027 char name[64];
8028
8029 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008030 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008031}
8032
8033static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008034/*
8035 * start end
8036 * name func interrupt func
8037 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008038{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8039 is_misc_err_name, is_misc_err_int },
8040{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8041 is_sdma_eng_err_name, is_sdma_eng_err_int },
8042{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8043 is_sendctxt_err_name, is_sendctxt_err_int },
8044{ IS_SDMA_START, IS_SDMA_END,
8045 is_sdma_eng_name, is_sdma_eng_int },
8046{ IS_VARIOUS_START, IS_VARIOUS_END,
8047 is_various_name, is_various_int },
8048{ IS_DC_START, IS_DC_END,
8049 is_dc_name, is_dc_int },
8050{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8051 is_rcv_avail_name, is_rcv_avail_int },
8052{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8053 is_rcv_urgent_name, is_rcv_urgent_int },
8054{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8055 is_send_credit_name, is_send_credit_int},
8056{ IS_RESERVED_START, IS_RESERVED_END,
8057 is_reserved_name, is_reserved_int},
8058};
8059
8060/*
8061 * Interrupt source interrupt - called when the given source has an interrupt.
8062 * Source is a bit index into an array of 64-bit integers.
8063 */
8064static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8065{
8066 const struct is_table *entry;
8067
8068 /* avoids a double compare by walking the table in-order */
8069 for (entry = &is_table[0]; entry->is_name; entry++) {
8070 if (source < entry->end) {
8071 trace_hfi1_interrupt(dd, entry, source);
8072 entry->is_int(dd, source - entry->start);
8073 return;
8074 }
8075 }
8076 /* fell off the end */
8077 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8078}
8079
8080/*
8081 * General interrupt handler. This is able to correctly handle
8082 * all interrupts in case INTx is used.
8083 */
8084static irqreturn_t general_interrupt(int irq, void *data)
8085{
8086 struct hfi1_devdata *dd = data;
8087 u64 regs[CCE_NUM_INT_CSRS];
8088 u32 bit;
8089 int i;
8090
8091 this_cpu_inc(*dd->int_counter);
8092
8093 /* phase 1: scan and clear all handled interrupts */
8094 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8095 if (dd->gi_mask[i] == 0) {
8096 regs[i] = 0; /* used later */
8097 continue;
8098 }
8099 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8100 dd->gi_mask[i];
8101 /* only clear if anything is set */
8102 if (regs[i])
8103 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8104 }
8105
8106 /* phase 2: call the appropriate handler */
8107 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008108 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008109 is_interrupt(dd, bit);
8110 }
8111
8112 return IRQ_HANDLED;
8113}
8114
8115static irqreturn_t sdma_interrupt(int irq, void *data)
8116{
8117 struct sdma_engine *sde = data;
8118 struct hfi1_devdata *dd = sde->dd;
8119 u64 status;
8120
8121#ifdef CONFIG_SDMA_VERBOSITY
8122 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8123 slashstrip(__FILE__), __LINE__, __func__);
8124 sdma_dumpstate(sde);
8125#endif
8126
8127 this_cpu_inc(*dd->int_counter);
8128
8129 /* This read_csr is really bad in the hot path */
8130 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008131 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8132 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008133 if (likely(status)) {
8134 /* clear the interrupt(s) */
8135 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008136 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8137 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008138
8139 /* handle the interrupt(s) */
8140 sdma_engine_interrupt(sde, status);
8141 } else
8142 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008143 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008144
8145 return IRQ_HANDLED;
8146}
8147
8148/*
Dean Luickecd42f82016-02-03 14:35:14 -08008149 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8150 * to insure that the write completed. This does NOT guarantee that
8151 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008152 */
8153static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8154{
8155 struct hfi1_devdata *dd = rcd->dd;
8156 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8157
8158 mmiowb(); /* make sure everything before is written */
8159 write_csr(dd, addr, rcd->imask);
8160 /* force the above write on the chip and get a value back */
8161 (void)read_csr(dd, addr);
8162}
8163
8164/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008165void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008166{
8167 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8168}
8169
Dean Luickecd42f82016-02-03 14:35:14 -08008170/*
8171 * Return non-zero if a packet is present.
8172 *
8173 * This routine is called when rechecking for packets after the RcvAvail
8174 * interrupt has been cleared down. First, do a quick check of memory for
8175 * a packet present. If not found, use an expensive CSR read of the context
8176 * tail to determine the actual tail. The CSR read is necessary because there
8177 * is no method to push pending DMAs to memory other than an interrupt and we
8178 * are trying to determine if we need to force an interrupt.
8179 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008180static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8181{
Dean Luickecd42f82016-02-03 14:35:14 -08008182 u32 tail;
8183 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008184
Dean Luickecd42f82016-02-03 14:35:14 -08008185 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8186 present = (rcd->seq_cnt ==
8187 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8188 else /* is RDMA rtail */
8189 present = (rcd->head != get_rcvhdrtail(rcd));
8190
8191 if (present)
8192 return 1;
8193
8194 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8195 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8196 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008197}
8198
8199/*
8200 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8201 * This routine will try to handle packets immediately (latency), but if
8202 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008203 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008204 * invoked) is finished. The intent is to avoid extra interrupts while we
8205 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008206 */
8207static irqreturn_t receive_context_interrupt(int irq, void *data)
8208{
8209 struct hfi1_ctxtdata *rcd = data;
8210 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008211 int disposition;
8212 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008213
8214 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8215 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008216 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008217
Dean Luickf4f30031c2015-10-26 10:28:44 -04008218 /* receive interrupt remains blocked while processing packets */
8219 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008220
Dean Luickf4f30031c2015-10-26 10:28:44 -04008221 /*
8222 * Too many packets were seen while processing packets in this
8223 * IRQ handler. Invoke the handler thread. The receive interrupt
8224 * remains blocked.
8225 */
8226 if (disposition == RCV_PKT_LIMIT)
8227 return IRQ_WAKE_THREAD;
8228
8229 /*
8230 * The packet processor detected no more packets. Clear the receive
8231 * interrupt and recheck for a packet packet that may have arrived
8232 * after the previous check and interrupt clear. If a packet arrived,
8233 * force another interrupt.
8234 */
8235 clear_recv_intr(rcd);
8236 present = check_packet_present(rcd);
8237 if (present)
8238 force_recv_intr(rcd);
8239
8240 return IRQ_HANDLED;
8241}
8242
8243/*
8244 * Receive packet thread handler. This expects to be invoked with the
8245 * receive interrupt still blocked.
8246 */
8247static irqreturn_t receive_context_thread(int irq, void *data)
8248{
8249 struct hfi1_ctxtdata *rcd = data;
8250 int present;
8251
8252 /* receive interrupt is still blocked from the IRQ handler */
8253 (void)rcd->do_interrupt(rcd, 1);
8254
8255 /*
8256 * The packet processor will only return if it detected no more
8257 * packets. Hold IRQs here so we can safely clear the interrupt and
8258 * recheck for a packet that may have arrived after the previous
8259 * check and the interrupt clear. If a packet arrived, force another
8260 * interrupt.
8261 */
8262 local_irq_disable();
8263 clear_recv_intr(rcd);
8264 present = check_packet_present(rcd);
8265 if (present)
8266 force_recv_intr(rcd);
8267 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008268
8269 return IRQ_HANDLED;
8270}
8271
8272/* ========================================================================= */
8273
8274u32 read_physical_state(struct hfi1_devdata *dd)
8275{
8276 u64 reg;
8277
8278 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8279 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8280 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8281}
8282
Jim Snowfb9036d2016-01-11 18:32:21 -05008283u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008284{
8285 u64 reg;
8286
8287 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8288 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8289 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8290}
8291
8292static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8293{
8294 u64 reg;
8295
8296 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8297 /* clear current state, set new state */
8298 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8299 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8300 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8301}
8302
8303/*
8304 * Use the 8051 to read a LCB CSR.
8305 */
8306static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8307{
8308 u32 regno;
8309 int ret;
8310
8311 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8312 if (acquire_lcb_access(dd, 0) == 0) {
8313 *data = read_csr(dd, addr);
8314 release_lcb_access(dd, 0);
8315 return 0;
8316 }
8317 return -EBUSY;
8318 }
8319
8320 /* register is an index of LCB registers: (offset - base) / 8 */
8321 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8322 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8323 if (ret != HCMD_SUCCESS)
8324 return -EBUSY;
8325 return 0;
8326}
8327
8328/*
8329 * Read an LCB CSR. Access may not be in host control, so check.
8330 * Return 0 on success, -EBUSY on failure.
8331 */
8332int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8333{
8334 struct hfi1_pportdata *ppd = dd->pport;
8335
8336 /* if up, go through the 8051 for the value */
8337 if (ppd->host_link_state & HLS_UP)
8338 return read_lcb_via_8051(dd, addr, data);
8339 /* if going up or down, no access */
8340 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8341 return -EBUSY;
8342 /* otherwise, host has access */
8343 *data = read_csr(dd, addr);
8344 return 0;
8345}
8346
8347/*
8348 * Use the 8051 to write a LCB CSR.
8349 */
8350static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8351{
Dean Luick3bf40d62015-11-06 20:07:04 -05008352 u32 regno;
8353 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008354
Dean Luick3bf40d62015-11-06 20:07:04 -05008355 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8356 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8357 if (acquire_lcb_access(dd, 0) == 0) {
8358 write_csr(dd, addr, data);
8359 release_lcb_access(dd, 0);
8360 return 0;
8361 }
8362 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008363 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008364
8365 /* register is an index of LCB registers: (offset - base) / 8 */
8366 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8367 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8368 if (ret != HCMD_SUCCESS)
8369 return -EBUSY;
8370 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008371}
8372
8373/*
8374 * Write an LCB CSR. Access may not be in host control, so check.
8375 * Return 0 on success, -EBUSY on failure.
8376 */
8377int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8378{
8379 struct hfi1_pportdata *ppd = dd->pport;
8380
8381 /* if up, go through the 8051 for the value */
8382 if (ppd->host_link_state & HLS_UP)
8383 return write_lcb_via_8051(dd, addr, data);
8384 /* if going up or down, no access */
8385 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8386 return -EBUSY;
8387 /* otherwise, host has access */
8388 write_csr(dd, addr, data);
8389 return 0;
8390}
8391
8392/*
8393 * Returns:
8394 * < 0 = Linux error, not able to get access
8395 * > 0 = 8051 command RETURN_CODE
8396 */
8397static int do_8051_command(
8398 struct hfi1_devdata *dd,
8399 u32 type,
8400 u64 in_data,
8401 u64 *out_data)
8402{
8403 u64 reg, completed;
8404 int return_code;
8405 unsigned long flags;
8406 unsigned long timeout;
8407
8408 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8409
8410 /*
8411 * Alternative to holding the lock for a long time:
8412 * - keep busy wait - have other users bounce off
8413 */
8414 spin_lock_irqsave(&dd->dc8051_lock, flags);
8415
8416 /* We can't send any commands to the 8051 if it's in reset */
8417 if (dd->dc_shutdown) {
8418 return_code = -ENODEV;
8419 goto fail;
8420 }
8421
8422 /*
8423 * If an 8051 host command timed out previously, then the 8051 is
8424 * stuck.
8425 *
8426 * On first timeout, attempt to reset and restart the entire DC
8427 * block (including 8051). (Is this too big of a hammer?)
8428 *
8429 * If the 8051 times out a second time, the reset did not bring it
8430 * back to healthy life. In that case, fail any subsequent commands.
8431 */
8432 if (dd->dc8051_timed_out) {
8433 if (dd->dc8051_timed_out > 1) {
8434 dd_dev_err(dd,
8435 "Previous 8051 host command timed out, skipping command %u\n",
8436 type);
8437 return_code = -ENXIO;
8438 goto fail;
8439 }
8440 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8441 dc_shutdown(dd);
8442 dc_start(dd);
8443 spin_lock_irqsave(&dd->dc8051_lock, flags);
8444 }
8445
8446 /*
8447 * If there is no timeout, then the 8051 command interface is
8448 * waiting for a command.
8449 */
8450
8451 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008452 * When writing a LCB CSR, out_data contains the full value to
8453 * to be written, while in_data contains the relative LCB
8454 * address in 7:0. Do the work here, rather than the caller,
8455 * of distrubting the write data to where it needs to go:
8456 *
8457 * Write data
8458 * 39:00 -> in_data[47:8]
8459 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8460 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8461 */
8462 if (type == HCMD_WRITE_LCB_CSR) {
8463 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8464 reg = ((((*out_data) >> 40) & 0xff) <<
8465 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8466 | ((((*out_data) >> 48) & 0xffff) <<
8467 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8468 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8469 }
8470
8471 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008472 * Do two writes: the first to stabilize the type and req_data, the
8473 * second to activate.
8474 */
8475 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8476 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8477 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8478 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8479 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8480 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8481 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8482
8483 /* wait for completion, alternate: interrupt */
8484 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8485 while (1) {
8486 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8487 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8488 if (completed)
8489 break;
8490 if (time_after(jiffies, timeout)) {
8491 dd->dc8051_timed_out++;
8492 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8493 if (out_data)
8494 *out_data = 0;
8495 return_code = -ETIMEDOUT;
8496 goto fail;
8497 }
8498 udelay(2);
8499 }
8500
8501 if (out_data) {
8502 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8503 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8504 if (type == HCMD_READ_LCB_CSR) {
8505 /* top 16 bits are in a different register */
8506 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8507 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8508 << (48
8509 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8510 }
8511 }
8512 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8513 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8514 dd->dc8051_timed_out = 0;
8515 /*
8516 * Clear command for next user.
8517 */
8518 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8519
8520fail:
8521 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8522
8523 return return_code;
8524}
8525
8526static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8527{
8528 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8529}
8530
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008531int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8532 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008533{
8534 u64 data;
8535 int ret;
8536
8537 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8538 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8539 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8540 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8541 if (ret != HCMD_SUCCESS) {
8542 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008543 "load 8051 config: field id %d, lane %d, err %d\n",
8544 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008545 }
8546 return ret;
8547}
8548
8549/*
8550 * Read the 8051 firmware "registers". Use the RAM directly. Always
8551 * set the result, even on error.
8552 * Return 0 on success, -errno on failure
8553 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008554int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8555 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008556{
8557 u64 big_data;
8558 u32 addr;
8559 int ret;
8560
8561 /* address start depends on the lane_id */
8562 if (lane_id < 4)
8563 addr = (4 * NUM_GENERAL_FIELDS)
8564 + (lane_id * 4 * NUM_LANE_FIELDS);
8565 else
8566 addr = 0;
8567 addr += field_id * 4;
8568
8569 /* read is in 8-byte chunks, hardware will truncate the address down */
8570 ret = read_8051_data(dd, addr, 8, &big_data);
8571
8572 if (ret == 0) {
8573 /* extract the 4 bytes we want */
8574 if (addr & 0x4)
8575 *result = (u32)(big_data >> 32);
8576 else
8577 *result = (u32)big_data;
8578 } else {
8579 *result = 0;
8580 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008581 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008582 }
8583
8584 return ret;
8585}
8586
8587static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8588 u8 continuous)
8589{
8590 u32 frame;
8591
8592 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8593 | power_management << POWER_MANAGEMENT_SHIFT;
8594 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8595 GENERAL_CONFIG, frame);
8596}
8597
8598static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8599 u16 vl15buf, u8 crc_sizes)
8600{
8601 u32 frame;
8602
8603 frame = (u32)vau << VAU_SHIFT
8604 | (u32)z << Z_SHIFT
8605 | (u32)vcu << VCU_SHIFT
8606 | (u32)vl15buf << VL15BUF_SHIFT
8607 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8608 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8609 GENERAL_CONFIG, frame);
8610}
8611
8612static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8613 u8 *flag_bits, u16 *link_widths)
8614{
8615 u32 frame;
8616
8617 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008618 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008619 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8620 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8621 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8622}
8623
8624static int write_vc_local_link_width(struct hfi1_devdata *dd,
8625 u8 misc_bits,
8626 u8 flag_bits,
8627 u16 link_widths)
8628{
8629 u32 frame;
8630
8631 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8632 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8633 | (u32)link_widths << LINK_WIDTH_SHIFT;
8634 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8635 frame);
8636}
8637
8638static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8639 u8 device_rev)
8640{
8641 u32 frame;
8642
8643 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8644 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8645 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8646}
8647
8648static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8649 u8 *device_rev)
8650{
8651 u32 frame;
8652
8653 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8654 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8655 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8656 & REMOTE_DEVICE_REV_MASK;
8657}
8658
8659void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8660{
8661 u32 frame;
8662
8663 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8664 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8665 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8666}
8667
8668static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8669 u8 *continuous)
8670{
8671 u32 frame;
8672
8673 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8674 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8675 & POWER_MANAGEMENT_MASK;
8676 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8677 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8678}
8679
8680static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8681 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8682{
8683 u32 frame;
8684
8685 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8686 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8687 *z = (frame >> Z_SHIFT) & Z_MASK;
8688 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8689 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8690 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8691}
8692
8693static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8694 u8 *remote_tx_rate,
8695 u16 *link_widths)
8696{
8697 u32 frame;
8698
8699 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008700 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008701 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8702 & REMOTE_TX_RATE_MASK;
8703 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8704}
8705
8706static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8707{
8708 u32 frame;
8709
8710 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8711 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8712}
8713
8714static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8715{
8716 u32 frame;
8717
8718 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8719 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8720}
8721
8722static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8723{
8724 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8725}
8726
8727static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8728{
8729 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8730}
8731
8732void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8733{
8734 u32 frame;
8735 int ret;
8736
8737 *link_quality = 0;
8738 if (dd->pport->host_link_state & HLS_UP) {
8739 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008740 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008741 if (ret == 0)
8742 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8743 & LINK_QUALITY_MASK;
8744 }
8745}
8746
8747static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8748{
8749 u32 frame;
8750
8751 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8752 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8753}
8754
Dean Luickfeb831d2016-04-14 08:31:36 -07008755static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8756{
8757 u32 frame;
8758
8759 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8760 *ldr = (frame & 0xff);
8761}
8762
Mike Marciniszyn77241052015-07-30 15:17:43 -04008763static int read_tx_settings(struct hfi1_devdata *dd,
8764 u8 *enable_lane_tx,
8765 u8 *tx_polarity_inversion,
8766 u8 *rx_polarity_inversion,
8767 u8 *max_rate)
8768{
8769 u32 frame;
8770 int ret;
8771
8772 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8773 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8774 & ENABLE_LANE_TX_MASK;
8775 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8776 & TX_POLARITY_INVERSION_MASK;
8777 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8778 & RX_POLARITY_INVERSION_MASK;
8779 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8780 return ret;
8781}
8782
8783static int write_tx_settings(struct hfi1_devdata *dd,
8784 u8 enable_lane_tx,
8785 u8 tx_polarity_inversion,
8786 u8 rx_polarity_inversion,
8787 u8 max_rate)
8788{
8789 u32 frame;
8790
8791 /* no need to mask, all variable sizes match field widths */
8792 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8793 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8794 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8795 | max_rate << MAX_RATE_SHIFT;
8796 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8797}
8798
8799static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8800{
8801 u32 frame, version, prod_id;
8802 int ret, lane;
8803
8804 /* 4 lanes */
8805 for (lane = 0; lane < 4; lane++) {
8806 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8807 if (ret) {
Jubin John17fb4f22016-02-14 20:21:52 -08008808 dd_dev_err(dd,
8809 "Unable to read lane %d firmware details\n",
8810 lane);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008811 continue;
8812 }
8813 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8814 & SPICO_ROM_VERSION_MASK;
8815 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8816 & SPICO_ROM_PROD_ID_MASK;
8817 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008818 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8819 lane, version, prod_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008820 }
8821}
8822
8823/*
8824 * Read an idle LCB message.
8825 *
8826 * Returns 0 on success, -EINVAL on error
8827 */
8828static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8829{
8830 int ret;
8831
Jubin John17fb4f22016-02-14 20:21:52 -08008832 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008833 if (ret != HCMD_SUCCESS) {
8834 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008835 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008836 return -EINVAL;
8837 }
8838 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8839 /* return only the payload as we already know the type */
8840 *data_out >>= IDLE_PAYLOAD_SHIFT;
8841 return 0;
8842}
8843
8844/*
8845 * Read an idle SMA message. To be done in response to a notification from
8846 * the 8051.
8847 *
8848 * Returns 0 on success, -EINVAL on error
8849 */
8850static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8851{
Jubin John17fb4f22016-02-14 20:21:52 -08008852 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8853 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008854}
8855
8856/*
8857 * Send an idle LCB message.
8858 *
8859 * Returns 0 on success, -EINVAL on error
8860 */
8861static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8862{
8863 int ret;
8864
8865 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8866 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8867 if (ret != HCMD_SUCCESS) {
8868 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008869 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008870 return -EINVAL;
8871 }
8872 return 0;
8873}
8874
8875/*
8876 * Send an idle SMA message.
8877 *
8878 * Returns 0 on success, -EINVAL on error
8879 */
8880int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8881{
8882 u64 data;
8883
Jubin John17fb4f22016-02-14 20:21:52 -08008884 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8885 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008886 return send_idle_message(dd, data);
8887}
8888
8889/*
8890 * Initialize the LCB then do a quick link up. This may or may not be
8891 * in loopback.
8892 *
8893 * return 0 on success, -errno on error
8894 */
8895static int do_quick_linkup(struct hfi1_devdata *dd)
8896{
8897 u64 reg;
8898 unsigned long timeout;
8899 int ret;
8900
8901 lcb_shutdown(dd, 0);
8902
8903 if (loopback) {
8904 /* LCB_CFG_LOOPBACK.VAL = 2 */
8905 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8906 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008907 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008908 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8909 }
8910
8911 /* start the LCBs */
8912 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8913 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8914
8915 /* simulator only loopback steps */
8916 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8917 /* LCB_CFG_RUN.EN = 1 */
8918 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008919 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008920
8921 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8922 timeout = jiffies + msecs_to_jiffies(10);
8923 while (1) {
Jubin John17fb4f22016-02-14 20:21:52 -08008924 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008925 if (reg)
8926 break;
8927 if (time_after(jiffies, timeout)) {
8928 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008929 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008930 return -ETIMEDOUT;
8931 }
8932 udelay(2);
8933 }
8934
8935 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008936 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008937 }
8938
8939 if (!loopback) {
8940 /*
8941 * When doing quick linkup and not in loopback, both
8942 * sides must be done with LCB set-up before either
8943 * starts the quick linkup. Put a delay here so that
8944 * both sides can be started and have a chance to be
8945 * done with LCB set up before resuming.
8946 */
8947 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008948 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008949 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008950 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008951 }
8952
8953 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8954 set_8051_lcb_access(dd);
8955
8956 /*
8957 * State "quick" LinkUp request sets the physical link state to
8958 * LinkUp without a verify capability sequence.
8959 * This state is in simulator v37 and later.
8960 */
8961 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8962 if (ret != HCMD_SUCCESS) {
8963 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008964 "%s: set physical link state to quick LinkUp failed with return %d\n",
8965 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008966
8967 set_host_lcb_access(dd);
8968 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8969
8970 if (ret >= 0)
8971 ret = -EINVAL;
8972 return ret;
8973 }
8974
8975 return 0; /* success */
8976}
8977
8978/*
8979 * Set the SerDes to internal loopback mode.
8980 * Returns 0 on success, -errno on error.
8981 */
8982static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8983{
8984 int ret;
8985
8986 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8987 if (ret == HCMD_SUCCESS)
8988 return 0;
8989 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008990 "Set physical link state to SerDes Loopback failed with return %d\n",
8991 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008992 if (ret >= 0)
8993 ret = -EINVAL;
8994 return ret;
8995}
8996
8997/*
8998 * Do all special steps to set up loopback.
8999 */
9000static int init_loopback(struct hfi1_devdata *dd)
9001{
9002 dd_dev_info(dd, "Entering loopback mode\n");
9003
9004 /* all loopbacks should disable self GUID check */
9005 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009006 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009007
9008 /*
9009 * The simulator has only one loopback option - LCB. Switch
9010 * to that option, which includes quick link up.
9011 *
9012 * Accept all valid loopback values.
9013 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009014 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9015 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9016 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009017 loopback = LOOPBACK_LCB;
9018 quick_linkup = 1;
9019 return 0;
9020 }
9021
9022 /* handle serdes loopback */
9023 if (loopback == LOOPBACK_SERDES) {
9024 /* internal serdes loopack needs quick linkup on RTL */
9025 if (dd->icode == ICODE_RTL_SILICON)
9026 quick_linkup = 1;
9027 return set_serdes_loopback_mode(dd);
9028 }
9029
9030 /* LCB loopback - handled at poll time */
9031 if (loopback == LOOPBACK_LCB) {
9032 quick_linkup = 1; /* LCB is always quick linkup */
9033
9034 /* not supported in emulation due to emulation RTL changes */
9035 if (dd->icode == ICODE_FPGA_EMULATION) {
9036 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009037 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009038 return -EINVAL;
9039 }
9040 return 0;
9041 }
9042
9043 /* external cable loopback requires no extra steps */
9044 if (loopback == LOOPBACK_CABLE)
9045 return 0;
9046
9047 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9048 return -EINVAL;
9049}
9050
9051/*
9052 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9053 * used in the Verify Capability link width attribute.
9054 */
9055static u16 opa_to_vc_link_widths(u16 opa_widths)
9056{
9057 int i;
9058 u16 result = 0;
9059
9060 static const struct link_bits {
9061 u16 from;
9062 u16 to;
9063 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009064 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9065 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9066 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9067 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009068 };
9069
9070 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9071 if (opa_widths & opa_link_xlate[i].from)
9072 result |= opa_link_xlate[i].to;
9073 }
9074 return result;
9075}
9076
9077/*
9078 * Set link attributes before moving to polling.
9079 */
9080static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9081{
9082 struct hfi1_devdata *dd = ppd->dd;
9083 u8 enable_lane_tx;
9084 u8 tx_polarity_inversion;
9085 u8 rx_polarity_inversion;
9086 int ret;
9087
9088 /* reset our fabric serdes to clear any lingering problems */
9089 fabric_serdes_reset(dd);
9090
9091 /* set the local tx rate - need to read-modify-write */
9092 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009093 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009094 if (ret)
9095 goto set_local_link_attributes_fail;
9096
9097 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9098 /* set the tx rate to the fastest enabled */
9099 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9100 ppd->local_tx_rate = 1;
9101 else
9102 ppd->local_tx_rate = 0;
9103 } else {
9104 /* set the tx rate to all enabled */
9105 ppd->local_tx_rate = 0;
9106 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9107 ppd->local_tx_rate |= 2;
9108 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9109 ppd->local_tx_rate |= 1;
9110 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009111
9112 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009113 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009114 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009115 if (ret != HCMD_SUCCESS)
9116 goto set_local_link_attributes_fail;
9117
9118 /*
9119 * DC supports continuous updates.
9120 */
Jubin John17fb4f22016-02-14 20:21:52 -08009121 ret = write_vc_local_phy(dd,
9122 0 /* no power management */,
9123 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009124 if (ret != HCMD_SUCCESS)
9125 goto set_local_link_attributes_fail;
9126
9127 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9128 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9129 ppd->port_crc_mode_enabled);
9130 if (ret != HCMD_SUCCESS)
9131 goto set_local_link_attributes_fail;
9132
9133 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009134 opa_to_vc_link_widths(
9135 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009136 if (ret != HCMD_SUCCESS)
9137 goto set_local_link_attributes_fail;
9138
9139 /* let peer know who we are */
9140 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9141 if (ret == HCMD_SUCCESS)
9142 return 0;
9143
9144set_local_link_attributes_fail:
9145 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009146 "Failed to set local link attributes, return 0x%x\n",
9147 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009148 return ret;
9149}
9150
9151/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009152 * Call this to start the link.
9153 * Do not do anything if the link is disabled.
9154 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009155 */
9156int start_link(struct hfi1_pportdata *ppd)
9157{
9158 if (!ppd->link_enabled) {
9159 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009160 "%s: stopping link start because link is disabled\n",
9161 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009162 return 0;
9163 }
9164 if (!ppd->driver_link_ready) {
9165 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009166 "%s: stopping link start because driver is not ready\n",
9167 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009168 return 0;
9169 }
9170
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009171 /*
9172 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9173 * pkey table can be configured properly if the HFI unit is connected
9174 * to switch port with MgmtAllowed=NO
9175 */
9176 clear_full_mgmt_pkey(ppd);
9177
Easwar Hariharan623bba22016-04-12 11:25:57 -07009178 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009179}
9180
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009181static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9182{
9183 struct hfi1_devdata *dd = ppd->dd;
9184 u64 mask;
9185 unsigned long timeout;
9186
9187 /*
9188 * Check for QSFP interrupt for t_init (SFF 8679)
9189 */
9190 timeout = jiffies + msecs_to_jiffies(2000);
9191 while (1) {
9192 mask = read_csr(dd, dd->hfi1_id ?
9193 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9194 if (!(mask & QSFP_HFI0_INT_N)) {
9195 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9196 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9197 break;
9198 }
9199 if (time_after(jiffies, timeout)) {
9200 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9201 __func__);
9202 break;
9203 }
9204 udelay(2);
9205 }
9206}
9207
9208static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9209{
9210 struct hfi1_devdata *dd = ppd->dd;
9211 u64 mask;
9212
9213 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9214 if (enable)
9215 mask |= (u64)QSFP_HFI0_INT_N;
9216 else
9217 mask &= ~(u64)QSFP_HFI0_INT_N;
9218 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9219}
9220
9221void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009222{
9223 struct hfi1_devdata *dd = ppd->dd;
9224 u64 mask, qsfp_mask;
9225
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009226 /* Disable INT_N from triggering QSFP interrupts */
9227 set_qsfp_int_n(ppd, 0);
9228
9229 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009230 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009231
9232 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009233 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009234 qsfp_mask &= ~mask;
9235 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009236 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009237
9238 udelay(10);
9239
9240 qsfp_mask |= mask;
9241 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009242 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009243
9244 wait_for_qsfp_init(ppd);
9245
9246 /*
9247 * Allow INT_N to trigger the QSFP interrupt to watch
9248 * for alarms and warnings
9249 */
9250 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009251}
9252
9253static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9254 u8 *qsfp_interrupt_status)
9255{
9256 struct hfi1_devdata *dd = ppd->dd;
9257
9258 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009259 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9260 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9261 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009262
9263 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009264 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9265 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9266 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009267
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009268 /*
9269 * The remaining alarms/warnings don't matter if the link is down.
9270 */
9271 if (ppd->host_link_state & HLS_DOWN)
9272 return 0;
9273
Mike Marciniszyn77241052015-07-30 15:17:43 -04009274 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009275 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9276 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9277 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009278
9279 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009280 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9281 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9282 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009283
9284 /* Byte 2 is vendor specific */
9285
9286 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009287 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9288 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9289 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009290
9291 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009292 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9293 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9294 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009295
9296 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009297 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9298 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9299 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009300
9301 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009302 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9303 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9304 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009305
9306 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009307 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9308 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9309 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009310
9311 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009312 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9313 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9314 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009315
9316 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009317 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9318 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9319 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009320
9321 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009322 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9323 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9324 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009325
9326 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009327 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9328 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9329 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009330
9331 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009332 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9333 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9334 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009335
9336 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009337 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9338 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9339 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009340
9341 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009342 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9343 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9344 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009345
9346 /* Bytes 9-10 and 11-12 are reserved */
9347 /* Bytes 13-15 are vendor specific */
9348
9349 return 0;
9350}
9351
Easwar Hariharan623bba22016-04-12 11:25:57 -07009352/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009353void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009354{
9355 struct qsfp_data *qd;
9356 struct hfi1_pportdata *ppd;
9357 struct hfi1_devdata *dd;
9358
9359 qd = container_of(work, struct qsfp_data, qsfp_work);
9360 ppd = qd->ppd;
9361 dd = ppd->dd;
9362
9363 /* Sanity check */
9364 if (!qsfp_mod_present(ppd))
9365 return;
9366
9367 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009368 * Turn DC back on after cable has been re-inserted. Up until
9369 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009370 */
9371 dc_start(dd);
9372
9373 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009374 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009375
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009376 wait_for_qsfp_init(ppd);
9377
9378 /*
9379 * Allow INT_N to trigger the QSFP interrupt to watch
9380 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009381 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009382 set_qsfp_int_n(ppd, 1);
9383
9384 tune_serdes(ppd);
9385
9386 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009387 }
9388
9389 if (qd->check_interrupt_flags) {
9390 u8 qsfp_interrupt_status[16] = {0,};
9391
Dean Luick765a6fa2016-03-05 08:50:06 -08009392 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9393 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009394 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009395 "%s: Failed to read status of QSFP module\n",
9396 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009397 } else {
9398 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009399
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009400 handle_qsfp_error_conditions(
9401 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009402 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9403 ppd->qsfp_info.check_interrupt_flags = 0;
9404 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009405 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009406 }
9407 }
9408}
9409
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009410static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009411{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009412 struct hfi1_pportdata *ppd = dd->pport;
9413 u64 qsfp_mask, cce_int_mask;
9414 const int qsfp1_int_smask = QSFP1_INT % 64;
9415 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009416
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009417 /*
9418 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9419 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9420 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9421 * the index of the appropriate CSR in the CCEIntMask CSR array
9422 */
9423 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9424 (8 * (QSFP1_INT / 64)));
9425 if (dd->hfi1_id) {
9426 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9427 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9428 cce_int_mask);
9429 } else {
9430 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9431 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9432 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009433 }
9434
Mike Marciniszyn77241052015-07-30 15:17:43 -04009435 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9436 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009437 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9438 qsfp_mask);
9439 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9440 qsfp_mask);
9441
9442 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009443
9444 /* Handle active low nature of INT_N and MODPRST_N pins */
9445 if (qsfp_mod_present(ppd))
9446 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9447 write_csr(dd,
9448 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9449 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009450}
9451
Dean Luickbbdeb332015-12-01 15:38:15 -05009452/*
9453 * Do a one-time initialize of the LCB block.
9454 */
9455static void init_lcb(struct hfi1_devdata *dd)
9456{
Dean Luicka59329d2016-02-03 14:32:31 -08009457 /* simulator does not correctly handle LCB cclk loopback, skip */
9458 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9459 return;
9460
Dean Luickbbdeb332015-12-01 15:38:15 -05009461 /* the DC has been reset earlier in the driver load */
9462
9463 /* set LCB for cclk loopback on the port */
9464 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9465 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9466 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9467 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9468 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9469 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9470 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9471}
9472
Mike Marciniszyn77241052015-07-30 15:17:43 -04009473int bringup_serdes(struct hfi1_pportdata *ppd)
9474{
9475 struct hfi1_devdata *dd = ppd->dd;
9476 u64 guid;
9477 int ret;
9478
9479 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9480 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9481
9482 guid = ppd->guid;
9483 if (!guid) {
9484 if (dd->base_guid)
9485 guid = dd->base_guid + ppd->port - 1;
9486 ppd->guid = guid;
9487 }
9488
Mike Marciniszyn77241052015-07-30 15:17:43 -04009489 /* Set linkinit_reason on power up per OPA spec */
9490 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9491
Dean Luickbbdeb332015-12-01 15:38:15 -05009492 /* one-time init of the LCB */
9493 init_lcb(dd);
9494
Mike Marciniszyn77241052015-07-30 15:17:43 -04009495 if (loopback) {
9496 ret = init_loopback(dd);
9497 if (ret < 0)
9498 return ret;
9499 }
9500
Easwar Hariharan9775a992016-05-12 10:22:39 -07009501 get_port_type(ppd);
9502 if (ppd->port_type == PORT_TYPE_QSFP) {
9503 set_qsfp_int_n(ppd, 0);
9504 wait_for_qsfp_init(ppd);
9505 set_qsfp_int_n(ppd, 1);
9506 }
9507
9508 /*
9509 * Tune the SerDes to a ballpark setting for
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009510 * optimal signal and bit error rate
9511 * Needs to be done before starting the link
9512 */
9513 tune_serdes(ppd);
9514
Mike Marciniszyn77241052015-07-30 15:17:43 -04009515 return start_link(ppd);
9516}
9517
9518void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9519{
9520 struct hfi1_devdata *dd = ppd->dd;
9521
9522 /*
9523 * Shut down the link and keep it down. First turn off that the
9524 * driver wants to allow the link to be up (driver_link_ready).
9525 * Then make sure the link is not automatically restarted
9526 * (link_enabled). Cancel any pending restart. And finally
9527 * go offline.
9528 */
9529 ppd->driver_link_ready = 0;
9530 ppd->link_enabled = 0;
9531
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009532 ppd->offline_disabled_reason =
9533 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009534 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009535 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009536 set_link_state(ppd, HLS_DN_OFFLINE);
9537
9538 /* disable the port */
9539 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9540}
9541
9542static inline int init_cpu_counters(struct hfi1_devdata *dd)
9543{
9544 struct hfi1_pportdata *ppd;
9545 int i;
9546
9547 ppd = (struct hfi1_pportdata *)(dd + 1);
9548 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009549 ppd->ibport_data.rvp.rc_acks = NULL;
9550 ppd->ibport_data.rvp.rc_qacks = NULL;
9551 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9552 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9553 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9554 if (!ppd->ibport_data.rvp.rc_acks ||
9555 !ppd->ibport_data.rvp.rc_delayed_comp ||
9556 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009557 return -ENOMEM;
9558 }
9559
9560 return 0;
9561}
9562
9563static const char * const pt_names[] = {
9564 "expected",
9565 "eager",
9566 "invalid"
9567};
9568
9569static const char *pt_name(u32 type)
9570{
9571 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9572}
9573
9574/*
9575 * index is the index into the receive array
9576 */
9577void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9578 u32 type, unsigned long pa, u16 order)
9579{
9580 u64 reg;
9581 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9582 (dd->kregbase + RCV_ARRAY));
9583
9584 if (!(dd->flags & HFI1_PRESENT))
9585 goto done;
9586
9587 if (type == PT_INVALID) {
9588 pa = 0;
9589 } else if (type > PT_INVALID) {
9590 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009591 "unexpected receive array type %u for index %u, not handled\n",
9592 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009593 goto done;
9594 }
9595
9596 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9597 pt_name(type), index, pa, (unsigned long)order);
9598
9599#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9600 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9601 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9602 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9603 << RCV_ARRAY_RT_ADDR_SHIFT;
9604 writeq(reg, base + (index * 8));
9605
9606 if (type == PT_EAGER)
9607 /*
9608 * Eager entries are written one-by-one so we have to push them
9609 * after we write the entry.
9610 */
9611 flush_wc();
9612done:
9613 return;
9614}
9615
9616void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9617{
9618 struct hfi1_devdata *dd = rcd->dd;
9619 u32 i;
9620
9621 /* this could be optimized */
9622 for (i = rcd->eager_base; i < rcd->eager_base +
9623 rcd->egrbufs.alloced; i++)
9624 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9625
9626 for (i = rcd->expected_base;
9627 i < rcd->expected_base + rcd->expected_count; i++)
9628 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9629}
9630
9631int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9632 struct hfi1_ctxt_info *kinfo)
9633{
9634 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9635 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9636 return 0;
9637}
9638
9639struct hfi1_message_header *hfi1_get_msgheader(
9640 struct hfi1_devdata *dd, __le32 *rhf_addr)
9641{
9642 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9643
9644 return (struct hfi1_message_header *)
9645 (rhf_addr - dd->rhf_offset + offset);
9646}
9647
9648static const char * const ib_cfg_name_strings[] = {
9649 "HFI1_IB_CFG_LIDLMC",
9650 "HFI1_IB_CFG_LWID_DG_ENB",
9651 "HFI1_IB_CFG_LWID_ENB",
9652 "HFI1_IB_CFG_LWID",
9653 "HFI1_IB_CFG_SPD_ENB",
9654 "HFI1_IB_CFG_SPD",
9655 "HFI1_IB_CFG_RXPOL_ENB",
9656 "HFI1_IB_CFG_LREV_ENB",
9657 "HFI1_IB_CFG_LINKLATENCY",
9658 "HFI1_IB_CFG_HRTBT",
9659 "HFI1_IB_CFG_OP_VLS",
9660 "HFI1_IB_CFG_VL_HIGH_CAP",
9661 "HFI1_IB_CFG_VL_LOW_CAP",
9662 "HFI1_IB_CFG_OVERRUN_THRESH",
9663 "HFI1_IB_CFG_PHYERR_THRESH",
9664 "HFI1_IB_CFG_LINKDEFAULT",
9665 "HFI1_IB_CFG_PKEYS",
9666 "HFI1_IB_CFG_MTU",
9667 "HFI1_IB_CFG_LSTATE",
9668 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9669 "HFI1_IB_CFG_PMA_TICKS",
9670 "HFI1_IB_CFG_PORT"
9671};
9672
9673static const char *ib_cfg_name(int which)
9674{
9675 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9676 return "invalid";
9677 return ib_cfg_name_strings[which];
9678}
9679
9680int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9681{
9682 struct hfi1_devdata *dd = ppd->dd;
9683 int val = 0;
9684
9685 switch (which) {
9686 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9687 val = ppd->link_width_enabled;
9688 break;
9689 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9690 val = ppd->link_width_active;
9691 break;
9692 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9693 val = ppd->link_speed_enabled;
9694 break;
9695 case HFI1_IB_CFG_SPD: /* current Link speed */
9696 val = ppd->link_speed_active;
9697 break;
9698
9699 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9700 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9701 case HFI1_IB_CFG_LINKLATENCY:
9702 goto unimplemented;
9703
9704 case HFI1_IB_CFG_OP_VLS:
9705 val = ppd->vls_operational;
9706 break;
9707 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9708 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9709 break;
9710 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9711 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9712 break;
9713 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9714 val = ppd->overrun_threshold;
9715 break;
9716 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9717 val = ppd->phy_error_threshold;
9718 break;
9719 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9720 val = dd->link_default;
9721 break;
9722
9723 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9724 case HFI1_IB_CFG_PMA_TICKS:
9725 default:
9726unimplemented:
9727 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9728 dd_dev_info(
9729 dd,
9730 "%s: which %s: not implemented\n",
9731 __func__,
9732 ib_cfg_name(which));
9733 break;
9734 }
9735
9736 return val;
9737}
9738
9739/*
9740 * The largest MAD packet size.
9741 */
9742#define MAX_MAD_PACKET 2048
9743
9744/*
9745 * Return the maximum header bytes that can go on the _wire_
9746 * for this device. This count includes the ICRC which is
9747 * not part of the packet held in memory but it is appended
9748 * by the HW.
9749 * This is dependent on the device's receive header entry size.
9750 * HFI allows this to be set per-receive context, but the
9751 * driver presently enforces a global value.
9752 */
9753u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9754{
9755 /*
9756 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9757 * the Receive Header Entry Size minus the PBC (or RHF) size
9758 * plus one DW for the ICRC appended by HW.
9759 *
9760 * dd->rcd[0].rcvhdrqentsize is in DW.
9761 * We use rcd[0] as all context will have the same value. Also,
9762 * the first kernel context would have been allocated by now so
9763 * we are guaranteed a valid value.
9764 */
9765 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9766}
9767
9768/*
9769 * Set Send Length
9770 * @ppd - per port data
9771 *
9772 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9773 * registers compare against LRH.PktLen, so use the max bytes included
9774 * in the LRH.
9775 *
9776 * This routine changes all VL values except VL15, which it maintains at
9777 * the same value.
9778 */
9779static void set_send_length(struct hfi1_pportdata *ppd)
9780{
9781 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009782 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9783 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009784 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9785 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9786 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009787 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009788 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009789
9790 for (i = 0; i < ppd->vls_supported; i++) {
9791 if (dd->vld[i].mtu > maxvlmtu)
9792 maxvlmtu = dd->vld[i].mtu;
9793 if (i <= 3)
9794 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9795 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9796 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9797 else
9798 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9799 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9800 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9801 }
9802 write_csr(dd, SEND_LEN_CHECK0, len1);
9803 write_csr(dd, SEND_LEN_CHECK1, len2);
9804 /* adjust kernel credit return thresholds based on new MTUs */
9805 /* all kernel receive contexts have the same hdrqentsize */
9806 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009807 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9808 sc_mtu_to_threshold(dd->vld[i].sc,
9809 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009810 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009811 for (j = 0; j < INIT_SC_PER_VL; j++)
9812 sc_set_cr_threshold(
9813 pio_select_send_context_vl(dd, j, i),
9814 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009815 }
9816 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9817 sc_mtu_to_threshold(dd->vld[15].sc,
9818 dd->vld[15].mtu,
9819 dd->rcd[0]->rcvhdrqentsize));
9820 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009821
9822 /* Adjust maximum MTU for the port in DC */
9823 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9824 (ilog2(maxvlmtu >> 8) + 1);
9825 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9826 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9827 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9828 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9829 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9830}
9831
9832static void set_lidlmc(struct hfi1_pportdata *ppd)
9833{
9834 int i;
9835 u64 sreg = 0;
9836 struct hfi1_devdata *dd = ppd->dd;
9837 u32 mask = ~((1U << ppd->lmc) - 1);
9838 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9839
9840 if (dd->hfi1_snoop.mode_flag)
9841 dd_dev_info(dd, "Set lid/lmc while snooping");
9842
9843 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9844 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9845 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009846 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009847 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9848 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9849 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9850
9851 /*
9852 * Iterate over all the send contexts and set their SLID check
9853 */
9854 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9855 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9856 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9857 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9858
9859 for (i = 0; i < dd->chip_send_contexts; i++) {
9860 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9861 i, (u32)sreg);
9862 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9863 }
9864
9865 /* Now we have to do the same thing for the sdma engines */
9866 sdma_update_lmc(dd, mask, ppd->lid);
9867}
9868
9869static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9870{
9871 unsigned long timeout;
9872 u32 curr_state;
9873
9874 timeout = jiffies + msecs_to_jiffies(msecs);
9875 while (1) {
9876 curr_state = read_physical_state(dd);
9877 if (curr_state == state)
9878 break;
9879 if (time_after(jiffies, timeout)) {
9880 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009881 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9882 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009883 return -ETIMEDOUT;
9884 }
9885 usleep_range(1950, 2050); /* sleep 2ms-ish */
9886 }
9887
9888 return 0;
9889}
9890
9891/*
9892 * Helper for set_link_state(). Do not call except from that routine.
9893 * Expects ppd->hls_mutex to be held.
9894 *
9895 * @rem_reason value to be sent to the neighbor
9896 *
9897 * LinkDownReasons only set if transition succeeds.
9898 */
9899static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9900{
9901 struct hfi1_devdata *dd = ppd->dd;
9902 u32 pstate, previous_state;
9903 u32 last_local_state;
9904 u32 last_remote_state;
9905 int ret;
9906 int do_transition;
9907 int do_wait;
9908
9909 previous_state = ppd->host_link_state;
9910 ppd->host_link_state = HLS_GOING_OFFLINE;
9911 pstate = read_physical_state(dd);
9912 if (pstate == PLS_OFFLINE) {
9913 do_transition = 0; /* in right state */
9914 do_wait = 0; /* ...no need to wait */
9915 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9916 do_transition = 0; /* in an offline transient state */
9917 do_wait = 1; /* ...wait for it to settle */
9918 } else {
9919 do_transition = 1; /* need to move to offline */
9920 do_wait = 1; /* ...will need to wait */
9921 }
9922
9923 if (do_transition) {
9924 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -08009925 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009926
9927 if (ret != HCMD_SUCCESS) {
9928 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009929 "Failed to transition to Offline link state, return %d\n",
9930 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009931 return -EINVAL;
9932 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009933 if (ppd->offline_disabled_reason ==
9934 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009935 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009936 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009937 }
9938
9939 if (do_wait) {
9940 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009941 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009942 if (ret < 0)
9943 return ret;
9944 }
9945
9946 /* make sure the logical state is also down */
9947 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9948
9949 /*
9950 * Now in charge of LCB - must be after the physical state is
9951 * offline.quiet and before host_link_state is changed.
9952 */
9953 set_host_lcb_access(dd);
9954 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9955 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9956
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009957 if (ppd->port_type == PORT_TYPE_QSFP &&
9958 ppd->qsfp_info.limiting_active &&
9959 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -08009960 int ret;
9961
9962 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9963 if (ret == 0) {
9964 set_qsfp_tx(ppd, 0);
9965 release_chip_resource(dd, qsfp_resource(dd));
9966 } else {
9967 /* not fatal, but should warn */
9968 dd_dev_err(dd,
9969 "Unable to acquire lock to turn off QSFP TX\n");
9970 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009971 }
9972
Mike Marciniszyn77241052015-07-30 15:17:43 -04009973 /*
9974 * The LNI has a mandatory wait time after the physical state
9975 * moves to Offline.Quiet. The wait time may be different
9976 * depending on how the link went down. The 8051 firmware
9977 * will observe the needed wait time and only move to ready
9978 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009979 * is 6s, so wait that long and then at least 0.5s more for
9980 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009981 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009982 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009983 if (ret) {
9984 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009985 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009986 /* state is really offline, so make it so */
9987 ppd->host_link_state = HLS_DN_OFFLINE;
9988 return ret;
9989 }
9990
9991 /*
9992 * The state is now offline and the 8051 is ready to accept host
9993 * requests.
9994 * - change our state
9995 * - notify others if we were previously in a linkup state
9996 */
9997 ppd->host_link_state = HLS_DN_OFFLINE;
9998 if (previous_state & HLS_UP) {
9999 /* went down while link was up */
10000 handle_linkup_change(dd, 0);
10001 } else if (previous_state
10002 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10003 /* went down while attempting link up */
10004 /* byte 1 of last_*_state is the failure reason */
10005 read_last_local_state(dd, &last_local_state);
10006 read_last_remote_state(dd, &last_remote_state);
10007 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010008 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
10009 last_local_state, last_remote_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010010 }
10011
10012 /* the active link width (downgrade) is 0 on link down */
10013 ppd->link_width_active = 0;
10014 ppd->link_width_downgrade_tx_active = 0;
10015 ppd->link_width_downgrade_rx_active = 0;
10016 ppd->current_egress_rate = 0;
10017 return 0;
10018}
10019
10020/* return the link state name */
10021static const char *link_state_name(u32 state)
10022{
10023 const char *name;
10024 int n = ilog2(state);
10025 static const char * const names[] = {
10026 [__HLS_UP_INIT_BP] = "INIT",
10027 [__HLS_UP_ARMED_BP] = "ARMED",
10028 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10029 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10030 [__HLS_DN_POLL_BP] = "POLL",
10031 [__HLS_DN_DISABLE_BP] = "DISABLE",
10032 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10033 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10034 [__HLS_GOING_UP_BP] = "GOING_UP",
10035 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10036 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10037 };
10038
10039 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10040 return name ? name : "unknown";
10041}
10042
10043/* return the link state reason name */
10044static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10045{
10046 if (state == HLS_UP_INIT) {
10047 switch (ppd->linkinit_reason) {
10048 case OPA_LINKINIT_REASON_LINKUP:
10049 return "(LINKUP)";
10050 case OPA_LINKINIT_REASON_FLAPPING:
10051 return "(FLAPPING)";
10052 case OPA_LINKINIT_OUTSIDE_POLICY:
10053 return "(OUTSIDE_POLICY)";
10054 case OPA_LINKINIT_QUARANTINED:
10055 return "(QUARANTINED)";
10056 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10057 return "(INSUFIC_CAPABILITY)";
10058 default:
10059 break;
10060 }
10061 }
10062 return "";
10063}
10064
10065/*
10066 * driver_physical_state - convert the driver's notion of a port's
10067 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10068 * Return -1 (converted to a u32) to indicate error.
10069 */
10070u32 driver_physical_state(struct hfi1_pportdata *ppd)
10071{
10072 switch (ppd->host_link_state) {
10073 case HLS_UP_INIT:
10074 case HLS_UP_ARMED:
10075 case HLS_UP_ACTIVE:
10076 return IB_PORTPHYSSTATE_LINKUP;
10077 case HLS_DN_POLL:
10078 return IB_PORTPHYSSTATE_POLLING;
10079 case HLS_DN_DISABLE:
10080 return IB_PORTPHYSSTATE_DISABLED;
10081 case HLS_DN_OFFLINE:
10082 return OPA_PORTPHYSSTATE_OFFLINE;
10083 case HLS_VERIFY_CAP:
10084 return IB_PORTPHYSSTATE_POLLING;
10085 case HLS_GOING_UP:
10086 return IB_PORTPHYSSTATE_POLLING;
10087 case HLS_GOING_OFFLINE:
10088 return OPA_PORTPHYSSTATE_OFFLINE;
10089 case HLS_LINK_COOLDOWN:
10090 return OPA_PORTPHYSSTATE_OFFLINE;
10091 case HLS_DN_DOWNDEF:
10092 default:
10093 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10094 ppd->host_link_state);
10095 return -1;
10096 }
10097}
10098
10099/*
10100 * driver_logical_state - convert the driver's notion of a port's
10101 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10102 * (converted to a u32) to indicate error.
10103 */
10104u32 driver_logical_state(struct hfi1_pportdata *ppd)
10105{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010106 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010107 return IB_PORT_DOWN;
10108
10109 switch (ppd->host_link_state & HLS_UP) {
10110 case HLS_UP_INIT:
10111 return IB_PORT_INIT;
10112 case HLS_UP_ARMED:
10113 return IB_PORT_ARMED;
10114 case HLS_UP_ACTIVE:
10115 return IB_PORT_ACTIVE;
10116 default:
10117 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10118 ppd->host_link_state);
10119 return -1;
10120 }
10121}
10122
10123void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10124 u8 neigh_reason, u8 rem_reason)
10125{
10126 if (ppd->local_link_down_reason.latest == 0 &&
10127 ppd->neigh_link_down_reason.latest == 0) {
10128 ppd->local_link_down_reason.latest = lcl_reason;
10129 ppd->neigh_link_down_reason.latest = neigh_reason;
10130 ppd->remote_link_down_reason = rem_reason;
10131 }
10132}
10133
10134/*
10135 * Change the physical and/or logical link state.
10136 *
10137 * Do not call this routine while inside an interrupt. It contains
10138 * calls to routines that can take multiple seconds to finish.
10139 *
10140 * Returns 0 on success, -errno on failure.
10141 */
10142int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10143{
10144 struct hfi1_devdata *dd = ppd->dd;
10145 struct ib_event event = {.device = NULL};
10146 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010147 int orig_new_state, poll_bounce;
10148
10149 mutex_lock(&ppd->hls_lock);
10150
10151 orig_new_state = state;
10152 if (state == HLS_DN_DOWNDEF)
10153 state = dd->link_default;
10154
10155 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010156 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10157 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010158
10159 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010160 link_state_name(ppd->host_link_state),
10161 link_state_name(orig_new_state),
10162 poll_bounce ? "(bounce) " : "",
10163 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010164
Mike Marciniszyn77241052015-07-30 15:17:43 -040010165 /*
10166 * If we're going to a (HLS_*) link state that implies the logical
10167 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10168 * reset is_sm_config_started to 0.
10169 */
10170 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10171 ppd->is_sm_config_started = 0;
10172
10173 /*
10174 * Do nothing if the states match. Let a poll to poll link bounce
10175 * go through.
10176 */
10177 if (ppd->host_link_state == state && !poll_bounce)
10178 goto done;
10179
10180 switch (state) {
10181 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010182 if (ppd->host_link_state == HLS_DN_POLL &&
10183 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010184 /*
10185 * Quick link up jumps from polling to here.
10186 *
10187 * Whether in normal or loopback mode, the
10188 * simulator jumps from polling to link up.
10189 * Accept that here.
10190 */
Jubin John17fb4f22016-02-14 20:21:52 -080010191 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010192 } else if (ppd->host_link_state != HLS_GOING_UP) {
10193 goto unexpected;
10194 }
10195
10196 ppd->host_link_state = HLS_UP_INIT;
10197 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10198 if (ret) {
10199 /* logical state didn't change, stay at going_up */
10200 ppd->host_link_state = HLS_GOING_UP;
10201 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010202 "%s: logical state did not change to INIT\n",
10203 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010204 } else {
10205 /* clear old transient LINKINIT_REASON code */
10206 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10207 ppd->linkinit_reason =
10208 OPA_LINKINIT_REASON_LINKUP;
10209
10210 /* enable the port */
10211 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10212
10213 handle_linkup_change(dd, 1);
10214 }
10215 break;
10216 case HLS_UP_ARMED:
10217 if (ppd->host_link_state != HLS_UP_INIT)
10218 goto unexpected;
10219
10220 ppd->host_link_state = HLS_UP_ARMED;
10221 set_logical_state(dd, LSTATE_ARMED);
10222 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10223 if (ret) {
10224 /* logical state didn't change, stay at init */
10225 ppd->host_link_state = HLS_UP_INIT;
10226 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010227 "%s: logical state did not change to ARMED\n",
10228 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010229 }
10230 /*
10231 * The simulator does not currently implement SMA messages,
10232 * so neighbor_normal is not set. Set it here when we first
10233 * move to Armed.
10234 */
10235 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10236 ppd->neighbor_normal = 1;
10237 break;
10238 case HLS_UP_ACTIVE:
10239 if (ppd->host_link_state != HLS_UP_ARMED)
10240 goto unexpected;
10241
10242 ppd->host_link_state = HLS_UP_ACTIVE;
10243 set_logical_state(dd, LSTATE_ACTIVE);
10244 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10245 if (ret) {
10246 /* logical state didn't change, stay at armed */
10247 ppd->host_link_state = HLS_UP_ARMED;
10248 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010249 "%s: logical state did not change to ACTIVE\n",
10250 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010251 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010252 /* tell all engines to go running */
10253 sdma_all_running(dd);
10254
10255 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010256 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010257 event.element.port_num = ppd->port;
10258 event.event = IB_EVENT_PORT_ACTIVE;
10259 }
10260 break;
10261 case HLS_DN_POLL:
10262 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10263 ppd->host_link_state == HLS_DN_OFFLINE) &&
10264 dd->dc_shutdown)
10265 dc_start(dd);
10266 /* Hand LED control to the DC */
10267 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10268
10269 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10270 u8 tmp = ppd->link_enabled;
10271
10272 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10273 if (ret) {
10274 ppd->link_enabled = tmp;
10275 break;
10276 }
10277 ppd->remote_link_down_reason = 0;
10278
10279 if (ppd->driver_link_ready)
10280 ppd->link_enabled = 1;
10281 }
10282
Jim Snowfb9036d2016-01-11 18:32:21 -050010283 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010284 ret = set_local_link_attributes(ppd);
10285 if (ret)
10286 break;
10287
10288 ppd->port_error_action = 0;
10289 ppd->host_link_state = HLS_DN_POLL;
10290
10291 if (quick_linkup) {
10292 /* quick linkup does not go into polling */
10293 ret = do_quick_linkup(dd);
10294 } else {
10295 ret1 = set_physical_link_state(dd, PLS_POLLING);
10296 if (ret1 != HCMD_SUCCESS) {
10297 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010298 "Failed to transition to Polling link state, return 0x%x\n",
10299 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010300 ret = -EINVAL;
10301 }
10302 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010303 ppd->offline_disabled_reason =
10304 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010305 /*
10306 * If an error occurred above, go back to offline. The
10307 * caller may reschedule another attempt.
10308 */
10309 if (ret)
10310 goto_offline(ppd, 0);
10311 break;
10312 case HLS_DN_DISABLE:
10313 /* link is disabled */
10314 ppd->link_enabled = 0;
10315
10316 /* allow any state to transition to disabled */
10317
10318 /* must transition to offline first */
10319 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10320 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10321 if (ret)
10322 break;
10323 ppd->remote_link_down_reason = 0;
10324 }
10325
10326 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10327 if (ret1 != HCMD_SUCCESS) {
10328 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010329 "Failed to transition to Disabled link state, return 0x%x\n",
10330 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010331 ret = -EINVAL;
10332 break;
10333 }
10334 ppd->host_link_state = HLS_DN_DISABLE;
10335 dc_shutdown(dd);
10336 break;
10337 case HLS_DN_OFFLINE:
10338 if (ppd->host_link_state == HLS_DN_DISABLE)
10339 dc_start(dd);
10340
10341 /* allow any state to transition to offline */
10342 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10343 if (!ret)
10344 ppd->remote_link_down_reason = 0;
10345 break;
10346 case HLS_VERIFY_CAP:
10347 if (ppd->host_link_state != HLS_DN_POLL)
10348 goto unexpected;
10349 ppd->host_link_state = HLS_VERIFY_CAP;
10350 break;
10351 case HLS_GOING_UP:
10352 if (ppd->host_link_state != HLS_VERIFY_CAP)
10353 goto unexpected;
10354
10355 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10356 if (ret1 != HCMD_SUCCESS) {
10357 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010358 "Failed to transition to link up state, return 0x%x\n",
10359 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010360 ret = -EINVAL;
10361 break;
10362 }
10363 ppd->host_link_state = HLS_GOING_UP;
10364 break;
10365
10366 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10367 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10368 default:
10369 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010370 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010371 ret = -EINVAL;
10372 break;
10373 }
10374
Mike Marciniszyn77241052015-07-30 15:17:43 -040010375 goto done;
10376
10377unexpected:
10378 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010379 __func__, link_state_name(ppd->host_link_state),
10380 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010381 ret = -EINVAL;
10382
10383done:
10384 mutex_unlock(&ppd->hls_lock);
10385
10386 if (event.device)
10387 ib_dispatch_event(&event);
10388
10389 return ret;
10390}
10391
10392int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10393{
10394 u64 reg;
10395 int ret = 0;
10396
10397 switch (which) {
10398 case HFI1_IB_CFG_LIDLMC:
10399 set_lidlmc(ppd);
10400 break;
10401 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10402 /*
10403 * The VL Arbitrator high limit is sent in units of 4k
10404 * bytes, while HFI stores it in units of 64 bytes.
10405 */
Jubin John8638b772016-02-14 20:19:24 -080010406 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010407 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10408 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10409 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10410 break;
10411 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10412 /* HFI only supports POLL as the default link down state */
10413 if (val != HLS_DN_POLL)
10414 ret = -EINVAL;
10415 break;
10416 case HFI1_IB_CFG_OP_VLS:
10417 if (ppd->vls_operational != val) {
10418 ppd->vls_operational = val;
10419 if (!ppd->port)
10420 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010421 }
10422 break;
10423 /*
10424 * For link width, link width downgrade, and speed enable, always AND
10425 * the setting with what is actually supported. This has two benefits.
10426 * First, enabled can't have unsupported values, no matter what the
10427 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10428 * "fill in with your supported value" have all the bits in the
10429 * field set, so simply ANDing with supported has the desired result.
10430 */
10431 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10432 ppd->link_width_enabled = val & ppd->link_width_supported;
10433 break;
10434 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10435 ppd->link_width_downgrade_enabled =
10436 val & ppd->link_width_downgrade_supported;
10437 break;
10438 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10439 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10440 break;
10441 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10442 /*
10443 * HFI does not follow IB specs, save this value
10444 * so we can report it, if asked.
10445 */
10446 ppd->overrun_threshold = val;
10447 break;
10448 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10449 /*
10450 * HFI does not follow IB specs, save this value
10451 * so we can report it, if asked.
10452 */
10453 ppd->phy_error_threshold = val;
10454 break;
10455
10456 case HFI1_IB_CFG_MTU:
10457 set_send_length(ppd);
10458 break;
10459
10460 case HFI1_IB_CFG_PKEYS:
10461 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10462 set_partition_keys(ppd);
10463 break;
10464
10465 default:
10466 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10467 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010468 "%s: which %s, val 0x%x: not implemented\n",
10469 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010470 break;
10471 }
10472 return ret;
10473}
10474
10475/* begin functions related to vl arbitration table caching */
10476static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10477{
10478 int i;
10479
10480 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10481 VL_ARB_LOW_PRIO_TABLE_SIZE);
10482 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10483 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10484
10485 /*
10486 * Note that we always return values directly from the
10487 * 'vl_arb_cache' (and do no CSR reads) in response to a
10488 * 'Get(VLArbTable)'. This is obviously correct after a
10489 * 'Set(VLArbTable)', since the cache will then be up to
10490 * date. But it's also correct prior to any 'Set(VLArbTable)'
10491 * since then both the cache, and the relevant h/w registers
10492 * will be zeroed.
10493 */
10494
10495 for (i = 0; i < MAX_PRIO_TABLE; i++)
10496 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10497}
10498
10499/*
10500 * vl_arb_lock_cache
10501 *
10502 * All other vl_arb_* functions should be called only after locking
10503 * the cache.
10504 */
10505static inline struct vl_arb_cache *
10506vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10507{
10508 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10509 return NULL;
10510 spin_lock(&ppd->vl_arb_cache[idx].lock);
10511 return &ppd->vl_arb_cache[idx];
10512}
10513
10514static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10515{
10516 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10517}
10518
10519static void vl_arb_get_cache(struct vl_arb_cache *cache,
10520 struct ib_vl_weight_elem *vl)
10521{
10522 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10523}
10524
10525static void vl_arb_set_cache(struct vl_arb_cache *cache,
10526 struct ib_vl_weight_elem *vl)
10527{
10528 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10529}
10530
10531static int vl_arb_match_cache(struct vl_arb_cache *cache,
10532 struct ib_vl_weight_elem *vl)
10533{
10534 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10535}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010536
Mike Marciniszyn77241052015-07-30 15:17:43 -040010537/* end functions related to vl arbitration table caching */
10538
10539static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10540 u32 size, struct ib_vl_weight_elem *vl)
10541{
10542 struct hfi1_devdata *dd = ppd->dd;
10543 u64 reg;
10544 unsigned int i, is_up = 0;
10545 int drain, ret = 0;
10546
10547 mutex_lock(&ppd->hls_lock);
10548
10549 if (ppd->host_link_state & HLS_UP)
10550 is_up = 1;
10551
10552 drain = !is_ax(dd) && is_up;
10553
10554 if (drain)
10555 /*
10556 * Before adjusting VL arbitration weights, empty per-VL
10557 * FIFOs, otherwise a packet whose VL weight is being
10558 * set to 0 could get stuck in a FIFO with no chance to
10559 * egress.
10560 */
10561 ret = stop_drain_data_vls(dd);
10562
10563 if (ret) {
10564 dd_dev_err(
10565 dd,
10566 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10567 __func__);
10568 goto err;
10569 }
10570
10571 for (i = 0; i < size; i++, vl++) {
10572 /*
10573 * NOTE: The low priority shift and mask are used here, but
10574 * they are the same for both the low and high registers.
10575 */
10576 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10577 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10578 | (((u64)vl->weight
10579 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10580 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10581 write_csr(dd, target + (i * 8), reg);
10582 }
10583 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10584
10585 if (drain)
10586 open_fill_data_vls(dd); /* reopen all VLs */
10587
10588err:
10589 mutex_unlock(&ppd->hls_lock);
10590
10591 return ret;
10592}
10593
10594/*
10595 * Read one credit merge VL register.
10596 */
10597static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10598 struct vl_limit *vll)
10599{
10600 u64 reg = read_csr(dd, csr);
10601
10602 vll->dedicated = cpu_to_be16(
10603 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10604 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10605 vll->shared = cpu_to_be16(
10606 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10607 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10608}
10609
10610/*
10611 * Read the current credit merge limits.
10612 */
10613static int get_buffer_control(struct hfi1_devdata *dd,
10614 struct buffer_control *bc, u16 *overall_limit)
10615{
10616 u64 reg;
10617 int i;
10618
10619 /* not all entries are filled in */
10620 memset(bc, 0, sizeof(*bc));
10621
10622 /* OPA and HFI have a 1-1 mapping */
10623 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010624 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010625
10626 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10627 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10628
10629 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10630 bc->overall_shared_limit = cpu_to_be16(
10631 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10632 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10633 if (overall_limit)
10634 *overall_limit = (reg
10635 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10636 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10637 return sizeof(struct buffer_control);
10638}
10639
10640static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10641{
10642 u64 reg;
10643 int i;
10644
10645 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10646 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10647 for (i = 0; i < sizeof(u64); i++) {
10648 u8 byte = *(((u8 *)&reg) + i);
10649
10650 dp->vlnt[2 * i] = byte & 0xf;
10651 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10652 }
10653
10654 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10655 for (i = 0; i < sizeof(u64); i++) {
10656 u8 byte = *(((u8 *)&reg) + i);
10657
10658 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10659 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10660 }
10661 return sizeof(struct sc2vlnt);
10662}
10663
10664static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10665 struct ib_vl_weight_elem *vl)
10666{
10667 unsigned int i;
10668
10669 for (i = 0; i < nelems; i++, vl++) {
10670 vl->vl = 0xf;
10671 vl->weight = 0;
10672 }
10673}
10674
10675static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10676{
10677 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010678 DC_SC_VL_VAL(15_0,
10679 0, dp->vlnt[0] & 0xf,
10680 1, dp->vlnt[1] & 0xf,
10681 2, dp->vlnt[2] & 0xf,
10682 3, dp->vlnt[3] & 0xf,
10683 4, dp->vlnt[4] & 0xf,
10684 5, dp->vlnt[5] & 0xf,
10685 6, dp->vlnt[6] & 0xf,
10686 7, dp->vlnt[7] & 0xf,
10687 8, dp->vlnt[8] & 0xf,
10688 9, dp->vlnt[9] & 0xf,
10689 10, dp->vlnt[10] & 0xf,
10690 11, dp->vlnt[11] & 0xf,
10691 12, dp->vlnt[12] & 0xf,
10692 13, dp->vlnt[13] & 0xf,
10693 14, dp->vlnt[14] & 0xf,
10694 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010695 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080010696 DC_SC_VL_VAL(31_16,
10697 16, dp->vlnt[16] & 0xf,
10698 17, dp->vlnt[17] & 0xf,
10699 18, dp->vlnt[18] & 0xf,
10700 19, dp->vlnt[19] & 0xf,
10701 20, dp->vlnt[20] & 0xf,
10702 21, dp->vlnt[21] & 0xf,
10703 22, dp->vlnt[22] & 0xf,
10704 23, dp->vlnt[23] & 0xf,
10705 24, dp->vlnt[24] & 0xf,
10706 25, dp->vlnt[25] & 0xf,
10707 26, dp->vlnt[26] & 0xf,
10708 27, dp->vlnt[27] & 0xf,
10709 28, dp->vlnt[28] & 0xf,
10710 29, dp->vlnt[29] & 0xf,
10711 30, dp->vlnt[30] & 0xf,
10712 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010713}
10714
10715static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10716 u16 limit)
10717{
10718 if (limit != 0)
10719 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010720 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010721}
10722
10723/* change only the shared limit portion of SendCmGLobalCredit */
10724static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10725{
10726 u64 reg;
10727
10728 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10729 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10730 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10731 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10732}
10733
10734/* change only the total credit limit portion of SendCmGLobalCredit */
10735static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10736{
10737 u64 reg;
10738
10739 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10740 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10741 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10742 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10743}
10744
10745/* set the given per-VL shared limit */
10746static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10747{
10748 u64 reg;
10749 u32 addr;
10750
10751 if (vl < TXE_NUM_DATA_VL)
10752 addr = SEND_CM_CREDIT_VL + (8 * vl);
10753 else
10754 addr = SEND_CM_CREDIT_VL15;
10755
10756 reg = read_csr(dd, addr);
10757 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10758 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10759 write_csr(dd, addr, reg);
10760}
10761
10762/* set the given per-VL dedicated limit */
10763static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10764{
10765 u64 reg;
10766 u32 addr;
10767
10768 if (vl < TXE_NUM_DATA_VL)
10769 addr = SEND_CM_CREDIT_VL + (8 * vl);
10770 else
10771 addr = SEND_CM_CREDIT_VL15;
10772
10773 reg = read_csr(dd, addr);
10774 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10775 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10776 write_csr(dd, addr, reg);
10777}
10778
10779/* spin until the given per-VL status mask bits clear */
10780static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10781 const char *which)
10782{
10783 unsigned long timeout;
10784 u64 reg;
10785
10786 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10787 while (1) {
10788 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10789
10790 if (reg == 0)
10791 return; /* success */
10792 if (time_after(jiffies, timeout))
10793 break; /* timed out */
10794 udelay(1);
10795 }
10796
10797 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010798 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10799 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010800 /*
10801 * If this occurs, it is likely there was a credit loss on the link.
10802 * The only recovery from that is a link bounce.
10803 */
10804 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010805 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010806}
10807
10808/*
10809 * The number of credits on the VLs may be changed while everything
10810 * is "live", but the following algorithm must be followed due to
10811 * how the hardware is actually implemented. In particular,
10812 * Return_Credit_Status[] is the only correct status check.
10813 *
10814 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10815 * set Global_Shared_Credit_Limit = 0
10816 * use_all_vl = 1
10817 * mask0 = all VLs that are changing either dedicated or shared limits
10818 * set Shared_Limit[mask0] = 0
10819 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10820 * if (changing any dedicated limit)
10821 * mask1 = all VLs that are lowering dedicated limits
10822 * lower Dedicated_Limit[mask1]
10823 * spin until Return_Credit_Status[mask1] == 0
10824 * raise Dedicated_Limits
10825 * raise Shared_Limits
10826 * raise Global_Shared_Credit_Limit
10827 *
10828 * lower = if the new limit is lower, set the limit to the new value
10829 * raise = if the new limit is higher than the current value (may be changed
10830 * earlier in the algorithm), set the new limit to the new value
10831 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010832int set_buffer_control(struct hfi1_pportdata *ppd,
10833 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010834{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010835 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010836 u64 changing_mask, ld_mask, stat_mask;
10837 int change_count;
10838 int i, use_all_mask;
10839 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010840 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010841 /*
10842 * A0: add the variable any_shared_limit_changing below and in the
10843 * algorithm above. If removing A0 support, it can be removed.
10844 */
10845 int any_shared_limit_changing;
10846 struct buffer_control cur_bc;
10847 u8 changing[OPA_MAX_VLS];
10848 u8 lowering_dedicated[OPA_MAX_VLS];
10849 u16 cur_total;
10850 u32 new_total = 0;
10851 const u64 all_mask =
10852 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10853 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10854 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10855 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10856 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10857 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10858 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10859 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10860 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10861
10862#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10863#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10864
Mike Marciniszyn77241052015-07-30 15:17:43 -040010865 /* find the new total credits, do sanity check on unused VLs */
10866 for (i = 0; i < OPA_MAX_VLS; i++) {
10867 if (valid_vl(i)) {
10868 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10869 continue;
10870 }
10871 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080010872 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010873 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080010874 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010875 new_bc->vl[i].dedicated = 0;
10876 new_bc->vl[i].shared = 0;
10877 }
10878 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010879
Mike Marciniszyn77241052015-07-30 15:17:43 -040010880 /* fetch the current values */
10881 get_buffer_control(dd, &cur_bc, &cur_total);
10882
10883 /*
10884 * Create the masks we will use.
10885 */
10886 memset(changing, 0, sizeof(changing));
10887 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080010888 /*
10889 * NOTE: Assumes that the individual VL bits are adjacent and in
10890 * increasing order
10891 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010892 stat_mask =
10893 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10894 changing_mask = 0;
10895 ld_mask = 0;
10896 change_count = 0;
10897 any_shared_limit_changing = 0;
10898 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10899 if (!valid_vl(i))
10900 continue;
10901 this_shared_changing = new_bc->vl[i].shared
10902 != cur_bc.vl[i].shared;
10903 if (this_shared_changing)
10904 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010905 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10906 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010907 changing[i] = 1;
10908 changing_mask |= stat_mask;
10909 change_count++;
10910 }
10911 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10912 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10913 lowering_dedicated[i] = 1;
10914 ld_mask |= stat_mask;
10915 }
10916 }
10917
10918 /* bracket the credit change with a total adjustment */
10919 if (new_total > cur_total)
10920 set_global_limit(dd, new_total);
10921
10922 /*
10923 * Start the credit change algorithm.
10924 */
10925 use_all_mask = 0;
10926 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010927 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10928 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010929 set_global_shared(dd, 0);
10930 cur_bc.overall_shared_limit = 0;
10931 use_all_mask = 1;
10932 }
10933
10934 for (i = 0; i < NUM_USABLE_VLS; i++) {
10935 if (!valid_vl(i))
10936 continue;
10937
10938 if (changing[i]) {
10939 set_vl_shared(dd, i, 0);
10940 cur_bc.vl[i].shared = 0;
10941 }
10942 }
10943
10944 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080010945 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010946
10947 if (change_count > 0) {
10948 for (i = 0; i < NUM_USABLE_VLS; i++) {
10949 if (!valid_vl(i))
10950 continue;
10951
10952 if (lowering_dedicated[i]) {
10953 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010954 be16_to_cpu(new_bc->
10955 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010956 cur_bc.vl[i].dedicated =
10957 new_bc->vl[i].dedicated;
10958 }
10959 }
10960
10961 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10962
10963 /* now raise all dedicated that are going up */
10964 for (i = 0; i < NUM_USABLE_VLS; i++) {
10965 if (!valid_vl(i))
10966 continue;
10967
10968 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10969 be16_to_cpu(cur_bc.vl[i].dedicated))
10970 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010971 be16_to_cpu(new_bc->
10972 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010973 }
10974 }
10975
10976 /* next raise all shared that are going up */
10977 for (i = 0; i < NUM_USABLE_VLS; i++) {
10978 if (!valid_vl(i))
10979 continue;
10980
10981 if (be16_to_cpu(new_bc->vl[i].shared) >
10982 be16_to_cpu(cur_bc.vl[i].shared))
10983 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10984 }
10985
10986 /* finally raise the global shared */
10987 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080010988 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010989 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010990 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010991
10992 /* bracket the credit change with a total adjustment */
10993 if (new_total < cur_total)
10994 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010995
10996 /*
10997 * Determine the actual number of operational VLS using the number of
10998 * dedicated and shared credits for each VL.
10999 */
11000 if (change_count > 0) {
11001 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11002 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11003 be16_to_cpu(new_bc->vl[i].shared) > 0)
11004 vl_count++;
11005 ppd->actual_vls_operational = vl_count;
11006 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11007 ppd->actual_vls_operational :
11008 ppd->vls_operational,
11009 NULL);
11010 if (ret == 0)
11011 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11012 ppd->actual_vls_operational :
11013 ppd->vls_operational, NULL);
11014 if (ret)
11015 return ret;
11016 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011017 return 0;
11018}
11019
11020/*
11021 * Read the given fabric manager table. Return the size of the
11022 * table (in bytes) on success, and a negative error code on
11023 * failure.
11024 */
11025int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11026
11027{
11028 int size;
11029 struct vl_arb_cache *vlc;
11030
11031 switch (which) {
11032 case FM_TBL_VL_HIGH_ARB:
11033 size = 256;
11034 /*
11035 * OPA specifies 128 elements (of 2 bytes each), though
11036 * HFI supports only 16 elements in h/w.
11037 */
11038 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11039 vl_arb_get_cache(vlc, t);
11040 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11041 break;
11042 case FM_TBL_VL_LOW_ARB:
11043 size = 256;
11044 /*
11045 * OPA specifies 128 elements (of 2 bytes each), though
11046 * HFI supports only 16 elements in h/w.
11047 */
11048 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11049 vl_arb_get_cache(vlc, t);
11050 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11051 break;
11052 case FM_TBL_BUFFER_CONTROL:
11053 size = get_buffer_control(ppd->dd, t, NULL);
11054 break;
11055 case FM_TBL_SC2VLNT:
11056 size = get_sc2vlnt(ppd->dd, t);
11057 break;
11058 case FM_TBL_VL_PREEMPT_ELEMS:
11059 size = 256;
11060 /* OPA specifies 128 elements, of 2 bytes each */
11061 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11062 break;
11063 case FM_TBL_VL_PREEMPT_MATRIX:
11064 size = 256;
11065 /*
11066 * OPA specifies that this is the same size as the VL
11067 * arbitration tables (i.e., 256 bytes).
11068 */
11069 break;
11070 default:
11071 return -EINVAL;
11072 }
11073 return size;
11074}
11075
11076/*
11077 * Write the given fabric manager table.
11078 */
11079int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11080{
11081 int ret = 0;
11082 struct vl_arb_cache *vlc;
11083
11084 switch (which) {
11085 case FM_TBL_VL_HIGH_ARB:
11086 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11087 if (vl_arb_match_cache(vlc, t)) {
11088 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11089 break;
11090 }
11091 vl_arb_set_cache(vlc, t);
11092 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11093 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11094 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11095 break;
11096 case FM_TBL_VL_LOW_ARB:
11097 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11098 if (vl_arb_match_cache(vlc, t)) {
11099 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11100 break;
11101 }
11102 vl_arb_set_cache(vlc, t);
11103 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11104 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11105 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11106 break;
11107 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011108 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011109 break;
11110 case FM_TBL_SC2VLNT:
11111 set_sc2vlnt(ppd->dd, t);
11112 break;
11113 default:
11114 ret = -EINVAL;
11115 }
11116 return ret;
11117}
11118
11119/*
11120 * Disable all data VLs.
11121 *
11122 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11123 */
11124static int disable_data_vls(struct hfi1_devdata *dd)
11125{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011126 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011127 return 1;
11128
11129 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11130
11131 return 0;
11132}
11133
11134/*
11135 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11136 * Just re-enables all data VLs (the "fill" part happens
11137 * automatically - the name was chosen for symmetry with
11138 * stop_drain_data_vls()).
11139 *
11140 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11141 */
11142int open_fill_data_vls(struct hfi1_devdata *dd)
11143{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011144 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011145 return 1;
11146
11147 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11148
11149 return 0;
11150}
11151
11152/*
11153 * drain_data_vls() - assumes that disable_data_vls() has been called,
11154 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11155 * engines to drop to 0.
11156 */
11157static void drain_data_vls(struct hfi1_devdata *dd)
11158{
11159 sc_wait(dd);
11160 sdma_wait(dd);
11161 pause_for_credit_return(dd);
11162}
11163
11164/*
11165 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11166 *
11167 * Use open_fill_data_vls() to resume using data VLs. This pair is
11168 * meant to be used like this:
11169 *
11170 * stop_drain_data_vls(dd);
11171 * // do things with per-VL resources
11172 * open_fill_data_vls(dd);
11173 */
11174int stop_drain_data_vls(struct hfi1_devdata *dd)
11175{
11176 int ret;
11177
11178 ret = disable_data_vls(dd);
11179 if (ret == 0)
11180 drain_data_vls(dd);
11181
11182 return ret;
11183}
11184
11185/*
11186 * Convert a nanosecond time to a cclock count. No matter how slow
11187 * the cclock, a non-zero ns will always have a non-zero result.
11188 */
11189u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11190{
11191 u32 cclocks;
11192
11193 if (dd->icode == ICODE_FPGA_EMULATION)
11194 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11195 else /* simulation pretends to be ASIC */
11196 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11197 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11198 cclocks = 1;
11199 return cclocks;
11200}
11201
11202/*
11203 * Convert a cclock count to nanoseconds. Not matter how slow
11204 * the cclock, a non-zero cclocks will always have a non-zero result.
11205 */
11206u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11207{
11208 u32 ns;
11209
11210 if (dd->icode == ICODE_FPGA_EMULATION)
11211 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11212 else /* simulation pretends to be ASIC */
11213 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11214 if (cclocks && !ns)
11215 ns = 1;
11216 return ns;
11217}
11218
11219/*
11220 * Dynamically adjust the receive interrupt timeout for a context based on
11221 * incoming packet rate.
11222 *
11223 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11224 */
11225static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11226{
11227 struct hfi1_devdata *dd = rcd->dd;
11228 u32 timeout = rcd->rcvavail_timeout;
11229
11230 /*
11231 * This algorithm doubles or halves the timeout depending on whether
11232 * the number of packets received in this interrupt were less than or
11233 * greater equal the interrupt count.
11234 *
11235 * The calculations below do not allow a steady state to be achieved.
11236 * Only at the endpoints it is possible to have an unchanging
11237 * timeout.
11238 */
11239 if (npkts < rcv_intr_count) {
11240 /*
11241 * Not enough packets arrived before the timeout, adjust
11242 * timeout downward.
11243 */
11244 if (timeout < 2) /* already at minimum? */
11245 return;
11246 timeout >>= 1;
11247 } else {
11248 /*
11249 * More than enough packets arrived before the timeout, adjust
11250 * timeout upward.
11251 */
11252 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11253 return;
11254 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11255 }
11256
11257 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011258 /*
11259 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11260 * been verified to be in range
11261 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011262 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011263 (u64)timeout <<
11264 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011265}
11266
11267void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11268 u32 intr_adjust, u32 npkts)
11269{
11270 struct hfi1_devdata *dd = rcd->dd;
11271 u64 reg;
11272 u32 ctxt = rcd->ctxt;
11273
11274 /*
11275 * Need to write timeout register before updating RcvHdrHead to ensure
11276 * that a new value is used when the HW decides to restart counting.
11277 */
11278 if (intr_adjust)
11279 adjust_rcv_timeout(rcd, npkts);
11280 if (updegr) {
11281 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11282 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11283 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11284 }
11285 mmiowb();
11286 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11287 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11288 << RCV_HDR_HEAD_HEAD_SHIFT);
11289 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11290 mmiowb();
11291}
11292
11293u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11294{
11295 u32 head, tail;
11296
11297 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11298 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11299
11300 if (rcd->rcvhdrtail_kvaddr)
11301 tail = get_rcvhdrtail(rcd);
11302 else
11303 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11304
11305 return head == tail;
11306}
11307
11308/*
11309 * Context Control and Receive Array encoding for buffer size:
11310 * 0x0 invalid
11311 * 0x1 4 KB
11312 * 0x2 8 KB
11313 * 0x3 16 KB
11314 * 0x4 32 KB
11315 * 0x5 64 KB
11316 * 0x6 128 KB
11317 * 0x7 256 KB
11318 * 0x8 512 KB (Receive Array only)
11319 * 0x9 1 MB (Receive Array only)
11320 * 0xa 2 MB (Receive Array only)
11321 *
11322 * 0xB-0xF - reserved (Receive Array only)
11323 *
11324 *
11325 * This routine assumes that the value has already been sanity checked.
11326 */
11327static u32 encoded_size(u32 size)
11328{
11329 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011330 case 4 * 1024: return 0x1;
11331 case 8 * 1024: return 0x2;
11332 case 16 * 1024: return 0x3;
11333 case 32 * 1024: return 0x4;
11334 case 64 * 1024: return 0x5;
11335 case 128 * 1024: return 0x6;
11336 case 256 * 1024: return 0x7;
11337 case 512 * 1024: return 0x8;
11338 case 1 * 1024 * 1024: return 0x9;
11339 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011340 }
11341 return 0x1; /* if invalid, go with the minimum size */
11342}
11343
11344void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11345{
11346 struct hfi1_ctxtdata *rcd;
11347 u64 rcvctrl, reg;
11348 int did_enable = 0;
11349
11350 rcd = dd->rcd[ctxt];
11351 if (!rcd)
11352 return;
11353
11354 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11355
11356 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11357 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011358 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11359 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011360 /* reset the tail and hdr addresses, and sequence count */
11361 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11362 rcd->rcvhdrq_phys);
11363 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11364 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11365 rcd->rcvhdrqtailaddr_phys);
11366 rcd->seq_cnt = 1;
11367
11368 /* reset the cached receive header queue head value */
11369 rcd->head = 0;
11370
11371 /*
11372 * Zero the receive header queue so we don't get false
11373 * positives when checking the sequence number. The
11374 * sequence numbers could land exactly on the same spot.
11375 * E.g. a rcd restart before the receive header wrapped.
11376 */
11377 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11378
11379 /* starting timeout */
11380 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11381
11382 /* enable the context */
11383 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11384
11385 /* clean the egr buffer size first */
11386 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11387 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11388 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11389 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11390
11391 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11392 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11393 did_enable = 1;
11394
11395 /* zero RcvEgrIndexHead */
11396 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11397
11398 /* set eager count and base index */
11399 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11400 & RCV_EGR_CTRL_EGR_CNT_MASK)
11401 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11402 (((rcd->eager_base >> RCV_SHIFT)
11403 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11404 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11405 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11406
11407 /*
11408 * Set TID (expected) count and base index.
11409 * rcd->expected_count is set to individual RcvArray entries,
11410 * not pairs, and the CSR takes a pair-count in groups of
11411 * four, so divide by 8.
11412 */
11413 reg = (((rcd->expected_count >> RCV_SHIFT)
11414 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11415 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11416 (((rcd->expected_base >> RCV_SHIFT)
11417 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11418 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11419 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011420 if (ctxt == HFI1_CTRL_CTXT)
11421 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011422 }
11423 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11424 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011425 /*
11426 * When receive context is being disabled turn on tail
11427 * update with a dummy tail address and then disable
11428 * receive context.
11429 */
11430 if (dd->rcvhdrtail_dummy_physaddr) {
11431 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11432 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011433 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011434 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11435 }
11436
Mike Marciniszyn77241052015-07-30 15:17:43 -040011437 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11438 }
11439 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11440 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11441 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11442 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11443 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11444 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011445 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11446 /* See comment on RcvCtxtCtrl.TailUpd above */
11447 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11448 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11449 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011450 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11451 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11452 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11453 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11454 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011455 /*
11456 * In one-packet-per-eager mode, the size comes from
11457 * the RcvArray entry.
11458 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011459 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11460 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11461 }
11462 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11463 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11464 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11465 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11466 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11467 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11468 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11469 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11470 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11471 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11472 rcd->rcvctrl = rcvctrl;
11473 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11474 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11475
11476 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011477 if (did_enable &&
11478 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011479 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11480 if (reg != 0) {
11481 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011482 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011483 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11484 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11485 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11486 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11487 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11488 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011489 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011490 }
11491 }
11492
11493 if (did_enable) {
11494 /*
11495 * The interrupt timeout and count must be set after
11496 * the context is enabled to take effect.
11497 */
11498 /* set interrupt timeout */
11499 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011500 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011501 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11502
11503 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11504 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11505 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11506 }
11507
11508 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11509 /*
11510 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011511 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11512 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011513 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011514 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11515 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011516}
11517
Dean Luick582e05c2016-02-18 11:13:01 -080011518u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011519{
11520 int ret;
11521 u64 val = 0;
11522
11523 if (namep) {
11524 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011525 *namep = dd->cntrnames;
11526 } else {
11527 const struct cntr_entry *entry;
11528 int i, j;
11529
11530 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011531
11532 /* Get the start of the block of counters */
11533 *cntrp = dd->cntrs;
11534
11535 /*
11536 * Now go and fill in each counter in the block.
11537 */
11538 for (i = 0; i < DEV_CNTR_LAST; i++) {
11539 entry = &dev_cntrs[i];
11540 hfi1_cdbg(CNTR, "reading %s", entry->name);
11541 if (entry->flags & CNTR_DISABLED) {
11542 /* Nothing */
11543 hfi1_cdbg(CNTR, "\tDisabled\n");
11544 } else {
11545 if (entry->flags & CNTR_VL) {
11546 hfi1_cdbg(CNTR, "\tPer VL\n");
11547 for (j = 0; j < C_VL_COUNT; j++) {
11548 val = entry->rw_cntr(entry,
11549 dd, j,
11550 CNTR_MODE_R,
11551 0);
11552 hfi1_cdbg(
11553 CNTR,
11554 "\t\tRead 0x%llx for %d\n",
11555 val, j);
11556 dd->cntrs[entry->offset + j] =
11557 val;
11558 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011559 } else if (entry->flags & CNTR_SDMA) {
11560 hfi1_cdbg(CNTR,
11561 "\t Per SDMA Engine\n");
11562 for (j = 0; j < dd->chip_sdma_engines;
11563 j++) {
11564 val =
11565 entry->rw_cntr(entry, dd, j,
11566 CNTR_MODE_R, 0);
11567 hfi1_cdbg(CNTR,
11568 "\t\tRead 0x%llx for %d\n",
11569 val, j);
11570 dd->cntrs[entry->offset + j] =
11571 val;
11572 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011573 } else {
11574 val = entry->rw_cntr(entry, dd,
11575 CNTR_INVALID_VL,
11576 CNTR_MODE_R, 0);
11577 dd->cntrs[entry->offset] = val;
11578 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11579 }
11580 }
11581 }
11582 }
11583 return ret;
11584}
11585
11586/*
11587 * Used by sysfs to create files for hfi stats to read
11588 */
Dean Luick582e05c2016-02-18 11:13:01 -080011589u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011590{
11591 int ret;
11592 u64 val = 0;
11593
11594 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011595 ret = ppd->dd->portcntrnameslen;
11596 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011597 } else {
11598 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011599 int i, j;
11600
Dean Luick582e05c2016-02-18 11:13:01 -080011601 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011602 *cntrp = ppd->cntrs;
11603
11604 for (i = 0; i < PORT_CNTR_LAST; i++) {
11605 entry = &port_cntrs[i];
11606 hfi1_cdbg(CNTR, "reading %s", entry->name);
11607 if (entry->flags & CNTR_DISABLED) {
11608 /* Nothing */
11609 hfi1_cdbg(CNTR, "\tDisabled\n");
11610 continue;
11611 }
11612
11613 if (entry->flags & CNTR_VL) {
11614 hfi1_cdbg(CNTR, "\tPer VL");
11615 for (j = 0; j < C_VL_COUNT; j++) {
11616 val = entry->rw_cntr(entry, ppd, j,
11617 CNTR_MODE_R,
11618 0);
11619 hfi1_cdbg(
11620 CNTR,
11621 "\t\tRead 0x%llx for %d",
11622 val, j);
11623 ppd->cntrs[entry->offset + j] = val;
11624 }
11625 } else {
11626 val = entry->rw_cntr(entry, ppd,
11627 CNTR_INVALID_VL,
11628 CNTR_MODE_R,
11629 0);
11630 ppd->cntrs[entry->offset] = val;
11631 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11632 }
11633 }
11634 }
11635 return ret;
11636}
11637
11638static void free_cntrs(struct hfi1_devdata *dd)
11639{
11640 struct hfi1_pportdata *ppd;
11641 int i;
11642
11643 if (dd->synth_stats_timer.data)
11644 del_timer_sync(&dd->synth_stats_timer);
11645 dd->synth_stats_timer.data = 0;
11646 ppd = (struct hfi1_pportdata *)(dd + 1);
11647 for (i = 0; i < dd->num_pports; i++, ppd++) {
11648 kfree(ppd->cntrs);
11649 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011650 free_percpu(ppd->ibport_data.rvp.rc_acks);
11651 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11652 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011653 ppd->cntrs = NULL;
11654 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011655 ppd->ibport_data.rvp.rc_acks = NULL;
11656 ppd->ibport_data.rvp.rc_qacks = NULL;
11657 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011658 }
11659 kfree(dd->portcntrnames);
11660 dd->portcntrnames = NULL;
11661 kfree(dd->cntrs);
11662 dd->cntrs = NULL;
11663 kfree(dd->scntrs);
11664 dd->scntrs = NULL;
11665 kfree(dd->cntrnames);
11666 dd->cntrnames = NULL;
11667}
11668
11669#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11670#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11671
11672static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11673 u64 *psval, void *context, int vl)
11674{
11675 u64 val;
11676 u64 sval = *psval;
11677
11678 if (entry->flags & CNTR_DISABLED) {
11679 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11680 return 0;
11681 }
11682
11683 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11684
11685 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11686
11687 /* If its a synthetic counter there is more work we need to do */
11688 if (entry->flags & CNTR_SYNTH) {
11689 if (sval == CNTR_MAX) {
11690 /* No need to read already saturated */
11691 return CNTR_MAX;
11692 }
11693
11694 if (entry->flags & CNTR_32BIT) {
11695 /* 32bit counters can wrap multiple times */
11696 u64 upper = sval >> 32;
11697 u64 lower = (sval << 32) >> 32;
11698
11699 if (lower > val) { /* hw wrapped */
11700 if (upper == CNTR_32BIT_MAX)
11701 val = CNTR_MAX;
11702 else
11703 upper++;
11704 }
11705
11706 if (val != CNTR_MAX)
11707 val = (upper << 32) | val;
11708
11709 } else {
11710 /* If we rolled we are saturated */
11711 if ((val < sval) || (val > CNTR_MAX))
11712 val = CNTR_MAX;
11713 }
11714 }
11715
11716 *psval = val;
11717
11718 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11719
11720 return val;
11721}
11722
11723static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11724 struct cntr_entry *entry,
11725 u64 *psval, void *context, int vl, u64 data)
11726{
11727 u64 val;
11728
11729 if (entry->flags & CNTR_DISABLED) {
11730 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11731 return 0;
11732 }
11733
11734 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11735
11736 if (entry->flags & CNTR_SYNTH) {
11737 *psval = data;
11738 if (entry->flags & CNTR_32BIT) {
11739 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11740 (data << 32) >> 32);
11741 val = data; /* return the full 64bit value */
11742 } else {
11743 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11744 data);
11745 }
11746 } else {
11747 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11748 }
11749
11750 *psval = val;
11751
11752 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11753
11754 return val;
11755}
11756
11757u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11758{
11759 struct cntr_entry *entry;
11760 u64 *sval;
11761
11762 entry = &dev_cntrs[index];
11763 sval = dd->scntrs + entry->offset;
11764
11765 if (vl != CNTR_INVALID_VL)
11766 sval += vl;
11767
11768 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11769}
11770
11771u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11772{
11773 struct cntr_entry *entry;
11774 u64 *sval;
11775
11776 entry = &dev_cntrs[index];
11777 sval = dd->scntrs + entry->offset;
11778
11779 if (vl != CNTR_INVALID_VL)
11780 sval += vl;
11781
11782 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11783}
11784
11785u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11786{
11787 struct cntr_entry *entry;
11788 u64 *sval;
11789
11790 entry = &port_cntrs[index];
11791 sval = ppd->scntrs + entry->offset;
11792
11793 if (vl != CNTR_INVALID_VL)
11794 sval += vl;
11795
11796 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11797 (index <= C_RCV_HDR_OVF_LAST)) {
11798 /* We do not want to bother for disabled contexts */
11799 return 0;
11800 }
11801
11802 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11803}
11804
11805u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11806{
11807 struct cntr_entry *entry;
11808 u64 *sval;
11809
11810 entry = &port_cntrs[index];
11811 sval = ppd->scntrs + entry->offset;
11812
11813 if (vl != CNTR_INVALID_VL)
11814 sval += vl;
11815
11816 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11817 (index <= C_RCV_HDR_OVF_LAST)) {
11818 /* We do not want to bother for disabled contexts */
11819 return 0;
11820 }
11821
11822 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11823}
11824
11825static void update_synth_timer(unsigned long opaque)
11826{
11827 u64 cur_tx;
11828 u64 cur_rx;
11829 u64 total_flits;
11830 u8 update = 0;
11831 int i, j, vl;
11832 struct hfi1_pportdata *ppd;
11833 struct cntr_entry *entry;
11834
11835 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11836
11837 /*
11838 * Rather than keep beating on the CSRs pick a minimal set that we can
11839 * check to watch for potential roll over. We can do this by looking at
11840 * the number of flits sent/recv. If the total flits exceeds 32bits then
11841 * we have to iterate all the counters and update.
11842 */
11843 entry = &dev_cntrs[C_DC_RCV_FLITS];
11844 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11845
11846 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11847 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11848
11849 hfi1_cdbg(
11850 CNTR,
11851 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11852 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11853
11854 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11855 /*
11856 * May not be strictly necessary to update but it won't hurt and
11857 * simplifies the logic here.
11858 */
11859 update = 1;
11860 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11861 dd->unit);
11862 } else {
11863 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11864 hfi1_cdbg(CNTR,
11865 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11866 total_flits, (u64)CNTR_32BIT_MAX);
11867 if (total_flits >= CNTR_32BIT_MAX) {
11868 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11869 dd->unit);
11870 update = 1;
11871 }
11872 }
11873
11874 if (update) {
11875 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11876 for (i = 0; i < DEV_CNTR_LAST; i++) {
11877 entry = &dev_cntrs[i];
11878 if (entry->flags & CNTR_VL) {
11879 for (vl = 0; vl < C_VL_COUNT; vl++)
11880 read_dev_cntr(dd, i, vl);
11881 } else {
11882 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11883 }
11884 }
11885 ppd = (struct hfi1_pportdata *)(dd + 1);
11886 for (i = 0; i < dd->num_pports; i++, ppd++) {
11887 for (j = 0; j < PORT_CNTR_LAST; j++) {
11888 entry = &port_cntrs[j];
11889 if (entry->flags & CNTR_VL) {
11890 for (vl = 0; vl < C_VL_COUNT; vl++)
11891 read_port_cntr(ppd, j, vl);
11892 } else {
11893 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11894 }
11895 }
11896 }
11897
11898 /*
11899 * We want the value in the register. The goal is to keep track
11900 * of the number of "ticks" not the counter value. In other
11901 * words if the register rolls we want to notice it and go ahead
11902 * and force an update.
11903 */
11904 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11905 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11906 CNTR_MODE_R, 0);
11907
11908 entry = &dev_cntrs[C_DC_RCV_FLITS];
11909 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11910 CNTR_MODE_R, 0);
11911
11912 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11913 dd->unit, dd->last_tx, dd->last_rx);
11914
11915 } else {
11916 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11917 }
11918
Bart Van Assche48a0cc132016-06-03 12:09:56 -070011919 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011920}
11921
11922#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11923static int init_cntrs(struct hfi1_devdata *dd)
11924{
Dean Luickc024c552016-01-11 18:30:57 -050011925 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011926 size_t sz;
11927 char *p;
11928 char name[C_MAX_NAME];
11929 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011930 const char *bit_type_32 = ",32";
11931 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011932
11933 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011934 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11935 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011936
11937 /***********************/
11938 /* per device counters */
11939 /***********************/
11940
11941 /* size names and determine how many we have*/
11942 dd->ndevcntrs = 0;
11943 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011944
11945 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011946 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11947 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11948 continue;
11949 }
11950
11951 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011952 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011953 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011954 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080011955 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011956 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011957 /* Add ",32" for 32-bit counters */
11958 if (dev_cntrs[i].flags & CNTR_32BIT)
11959 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011960 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011961 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011962 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011963 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011964 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011965 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011966 snprintf(name, C_MAX_NAME, "%s%d",
11967 dev_cntrs[i].name, j);
11968 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011969 /* Add ",32" for 32-bit counters */
11970 if (dev_cntrs[i].flags & CNTR_32BIT)
11971 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011972 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011973 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011974 }
11975 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011976 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011977 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011978 /* Add ",32" for 32-bit counters */
11979 if (dev_cntrs[i].flags & CNTR_32BIT)
11980 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011981 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011982 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011983 }
11984 }
11985
11986 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011987 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011988 if (!dd->cntrs)
11989 goto bail;
11990
Dean Luickc024c552016-01-11 18:30:57 -050011991 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011992 if (!dd->scntrs)
11993 goto bail;
11994
Mike Marciniszyn77241052015-07-30 15:17:43 -040011995 /* allocate space for the counter names */
11996 dd->cntrnameslen = sz;
11997 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11998 if (!dd->cntrnames)
11999 goto bail;
12000
12001 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012002 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012003 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12004 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012005 } else if (dev_cntrs[i].flags & CNTR_VL) {
12006 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012007 snprintf(name, C_MAX_NAME, "%s%d",
12008 dev_cntrs[i].name,
12009 vl_from_idx(j));
12010 memcpy(p, name, strlen(name));
12011 p += strlen(name);
12012
12013 /* Counter is 32 bits */
12014 if (dev_cntrs[i].flags & CNTR_32BIT) {
12015 memcpy(p, bit_type_32, bit_type_32_sz);
12016 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012017 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012018
Mike Marciniszyn77241052015-07-30 15:17:43 -040012019 *p++ = '\n';
12020 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012021 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12022 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012023 snprintf(name, C_MAX_NAME, "%s%d",
12024 dev_cntrs[i].name, j);
12025 memcpy(p, name, strlen(name));
12026 p += strlen(name);
12027
12028 /* Counter is 32 bits */
12029 if (dev_cntrs[i].flags & CNTR_32BIT) {
12030 memcpy(p, bit_type_32, bit_type_32_sz);
12031 p += bit_type_32_sz;
12032 }
12033
12034 *p++ = '\n';
12035 }
12036 } else {
12037 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12038 p += strlen(dev_cntrs[i].name);
12039
12040 /* Counter is 32 bits */
12041 if (dev_cntrs[i].flags & CNTR_32BIT) {
12042 memcpy(p, bit_type_32, bit_type_32_sz);
12043 p += bit_type_32_sz;
12044 }
12045
12046 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012047 }
12048 }
12049
12050 /*********************/
12051 /* per port counters */
12052 /*********************/
12053
12054 /*
12055 * Go through the counters for the overflows and disable the ones we
12056 * don't need. This varies based on platform so we need to do it
12057 * dynamically here.
12058 */
12059 rcv_ctxts = dd->num_rcv_contexts;
12060 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12061 i <= C_RCV_HDR_OVF_LAST; i++) {
12062 port_cntrs[i].flags |= CNTR_DISABLED;
12063 }
12064
12065 /* size port counter names and determine how many we have*/
12066 sz = 0;
12067 dd->nportcntrs = 0;
12068 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012069 if (port_cntrs[i].flags & CNTR_DISABLED) {
12070 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12071 continue;
12072 }
12073
12074 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012075 port_cntrs[i].offset = dd->nportcntrs;
12076 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012077 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012078 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012079 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012080 /* Add ",32" for 32-bit counters */
12081 if (port_cntrs[i].flags & CNTR_32BIT)
12082 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012083 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012084 dd->nportcntrs++;
12085 }
12086 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012087 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012088 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012089 /* Add ",32" for 32-bit counters */
12090 if (port_cntrs[i].flags & CNTR_32BIT)
12091 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012092 port_cntrs[i].offset = dd->nportcntrs;
12093 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012094 }
12095 }
12096
12097 /* allocate space for the counter names */
12098 dd->portcntrnameslen = sz;
12099 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12100 if (!dd->portcntrnames)
12101 goto bail;
12102
12103 /* fill in port cntr names */
12104 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12105 if (port_cntrs[i].flags & CNTR_DISABLED)
12106 continue;
12107
12108 if (port_cntrs[i].flags & CNTR_VL) {
12109 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012110 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012111 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012112 memcpy(p, name, strlen(name));
12113 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012114
12115 /* Counter is 32 bits */
12116 if (port_cntrs[i].flags & CNTR_32BIT) {
12117 memcpy(p, bit_type_32, bit_type_32_sz);
12118 p += bit_type_32_sz;
12119 }
12120
Mike Marciniszyn77241052015-07-30 15:17:43 -040012121 *p++ = '\n';
12122 }
12123 } else {
12124 memcpy(p, port_cntrs[i].name,
12125 strlen(port_cntrs[i].name));
12126 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012127
12128 /* Counter is 32 bits */
12129 if (port_cntrs[i].flags & CNTR_32BIT) {
12130 memcpy(p, bit_type_32, bit_type_32_sz);
12131 p += bit_type_32_sz;
12132 }
12133
Mike Marciniszyn77241052015-07-30 15:17:43 -040012134 *p++ = '\n';
12135 }
12136 }
12137
12138 /* allocate per port storage for counter values */
12139 ppd = (struct hfi1_pportdata *)(dd + 1);
12140 for (i = 0; i < dd->num_pports; i++, ppd++) {
12141 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12142 if (!ppd->cntrs)
12143 goto bail;
12144
12145 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12146 if (!ppd->scntrs)
12147 goto bail;
12148 }
12149
12150 /* CPU counters need to be allocated and zeroed */
12151 if (init_cpu_counters(dd))
12152 goto bail;
12153
12154 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12155 return 0;
12156bail:
12157 free_cntrs(dd);
12158 return -ENOMEM;
12159}
12160
Mike Marciniszyn77241052015-07-30 15:17:43 -040012161static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12162{
12163 switch (chip_lstate) {
12164 default:
12165 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012166 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12167 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012168 /* fall through */
12169 case LSTATE_DOWN:
12170 return IB_PORT_DOWN;
12171 case LSTATE_INIT:
12172 return IB_PORT_INIT;
12173 case LSTATE_ARMED:
12174 return IB_PORT_ARMED;
12175 case LSTATE_ACTIVE:
12176 return IB_PORT_ACTIVE;
12177 }
12178}
12179
12180u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12181{
12182 /* look at the HFI meta-states only */
12183 switch (chip_pstate & 0xf0) {
12184 default:
12185 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012186 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012187 /* fall through */
12188 case PLS_DISABLED:
12189 return IB_PORTPHYSSTATE_DISABLED;
12190 case PLS_OFFLINE:
12191 return OPA_PORTPHYSSTATE_OFFLINE;
12192 case PLS_POLLING:
12193 return IB_PORTPHYSSTATE_POLLING;
12194 case PLS_CONFIGPHY:
12195 return IB_PORTPHYSSTATE_TRAINING;
12196 case PLS_LINKUP:
12197 return IB_PORTPHYSSTATE_LINKUP;
12198 case PLS_PHYTEST:
12199 return IB_PORTPHYSSTATE_PHY_TEST;
12200 }
12201}
12202
12203/* return the OPA port logical state name */
12204const char *opa_lstate_name(u32 lstate)
12205{
12206 static const char * const port_logical_names[] = {
12207 "PORT_NOP",
12208 "PORT_DOWN",
12209 "PORT_INIT",
12210 "PORT_ARMED",
12211 "PORT_ACTIVE",
12212 "PORT_ACTIVE_DEFER",
12213 };
12214 if (lstate < ARRAY_SIZE(port_logical_names))
12215 return port_logical_names[lstate];
12216 return "unknown";
12217}
12218
12219/* return the OPA port physical state name */
12220const char *opa_pstate_name(u32 pstate)
12221{
12222 static const char * const port_physical_names[] = {
12223 "PHYS_NOP",
12224 "reserved1",
12225 "PHYS_POLL",
12226 "PHYS_DISABLED",
12227 "PHYS_TRAINING",
12228 "PHYS_LINKUP",
12229 "PHYS_LINK_ERR_RECOVER",
12230 "PHYS_PHY_TEST",
12231 "reserved8",
12232 "PHYS_OFFLINE",
12233 "PHYS_GANGED",
12234 "PHYS_TEST",
12235 };
12236 if (pstate < ARRAY_SIZE(port_physical_names))
12237 return port_physical_names[pstate];
12238 return "unknown";
12239}
12240
12241/*
12242 * Read the hardware link state and set the driver's cached value of it.
12243 * Return the (new) current value.
12244 */
12245u32 get_logical_state(struct hfi1_pportdata *ppd)
12246{
12247 u32 new_state;
12248
12249 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12250 if (new_state != ppd->lstate) {
12251 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012252 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012253 ppd->lstate = new_state;
12254 }
12255 /*
12256 * Set port status flags in the page mapped into userspace
12257 * memory. Do it here to ensure a reliable state - this is
12258 * the only function called by all state handling code.
12259 * Always set the flags due to the fact that the cache value
12260 * might have been changed explicitly outside of this
12261 * function.
12262 */
12263 if (ppd->statusp) {
12264 switch (ppd->lstate) {
12265 case IB_PORT_DOWN:
12266 case IB_PORT_INIT:
12267 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12268 HFI1_STATUS_IB_READY);
12269 break;
12270 case IB_PORT_ARMED:
12271 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12272 break;
12273 case IB_PORT_ACTIVE:
12274 *ppd->statusp |= HFI1_STATUS_IB_READY;
12275 break;
12276 }
12277 }
12278 return ppd->lstate;
12279}
12280
12281/**
12282 * wait_logical_linkstate - wait for an IB link state change to occur
12283 * @ppd: port device
12284 * @state: the state to wait for
12285 * @msecs: the number of milliseconds to wait
12286 *
12287 * Wait up to msecs milliseconds for IB link state change to occur.
12288 * For now, take the easy polling route.
12289 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12290 */
12291static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12292 int msecs)
12293{
12294 unsigned long timeout;
12295
12296 timeout = jiffies + msecs_to_jiffies(msecs);
12297 while (1) {
12298 if (get_logical_state(ppd) == state)
12299 return 0;
12300 if (time_after(jiffies, timeout))
12301 break;
12302 msleep(20);
12303 }
12304 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12305
12306 return -ETIMEDOUT;
12307}
12308
12309u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12310{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012311 u32 pstate;
12312 u32 ib_pstate;
12313
12314 pstate = read_physical_state(ppd->dd);
12315 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012316 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012317 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012318 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12319 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12320 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012321 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012322 }
12323 return ib_pstate;
12324}
12325
12326/*
12327 * Read/modify/write ASIC_QSFP register bits as selected by mask
12328 * data: 0 or 1 in the positions depending on what needs to be written
12329 * dir: 0 for read, 1 for write
12330 * mask: select by setting
12331 * I2CCLK (bit 0)
12332 * I2CDATA (bit 1)
12333 */
12334u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12335 u32 mask)
12336{
12337 u64 qsfp_oe, target_oe;
12338
12339 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12340 if (mask) {
12341 /* We are writing register bits, so lock access */
12342 dir &= mask;
12343 data &= mask;
12344
12345 qsfp_oe = read_csr(dd, target_oe);
12346 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12347 write_csr(dd, target_oe, qsfp_oe);
12348 }
12349 /* We are exclusively reading bits here, but it is unlikely
12350 * we'll get valid data when we set the direction of the pin
12351 * in the same call, so read should call this function again
12352 * to get valid data
12353 */
12354 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12355}
12356
12357#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12358(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12359
12360#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12361(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12362
12363int hfi1_init_ctxt(struct send_context *sc)
12364{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012365 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012366 struct hfi1_devdata *dd = sc->dd;
12367 u64 reg;
12368 u8 set = (sc->type == SC_USER ?
12369 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12370 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12371 reg = read_kctxt_csr(dd, sc->hw_context,
12372 SEND_CTXT_CHECK_ENABLE);
12373 if (set)
12374 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12375 else
12376 SET_STATIC_RATE_CONTROL_SMASK(reg);
12377 write_kctxt_csr(dd, sc->hw_context,
12378 SEND_CTXT_CHECK_ENABLE, reg);
12379 }
12380 return 0;
12381}
12382
12383int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12384{
12385 int ret = 0;
12386 u64 reg;
12387
12388 if (dd->icode != ICODE_RTL_SILICON) {
12389 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12390 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12391 __func__);
12392 return -EINVAL;
12393 }
12394 reg = read_csr(dd, ASIC_STS_THERM);
12395 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12396 ASIC_STS_THERM_CURR_TEMP_MASK);
12397 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12398 ASIC_STS_THERM_LO_TEMP_MASK);
12399 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12400 ASIC_STS_THERM_HI_TEMP_MASK);
12401 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12402 ASIC_STS_THERM_CRIT_TEMP_MASK);
12403 /* triggers is a 3-bit value - 1 bit per trigger. */
12404 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12405
12406 return ret;
12407}
12408
12409/* ========================================================================= */
12410
12411/*
12412 * Enable/disable chip from delivering interrupts.
12413 */
12414void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12415{
12416 int i;
12417
12418 /*
12419 * In HFI, the mask needs to be 1 to allow interrupts.
12420 */
12421 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012422 /* enable all interrupts */
12423 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012424 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012425
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012426 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012427 } else {
12428 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012429 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012430 }
12431}
12432
12433/*
12434 * Clear all interrupt sources on the chip.
12435 */
12436static void clear_all_interrupts(struct hfi1_devdata *dd)
12437{
12438 int i;
12439
12440 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012441 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012442
12443 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12444 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12445 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12446 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12447 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12448 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12449 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12450 for (i = 0; i < dd->chip_send_contexts; i++)
12451 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12452 for (i = 0; i < dd->chip_sdma_engines; i++)
12453 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12454
12455 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12456 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12457 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12458}
12459
12460/* Move to pcie.c? */
12461static void disable_intx(struct pci_dev *pdev)
12462{
12463 pci_intx(pdev, 0);
12464}
12465
12466static void clean_up_interrupts(struct hfi1_devdata *dd)
12467{
12468 int i;
12469
12470 /* remove irqs - must happen before disabling/turning off */
12471 if (dd->num_msix_entries) {
12472 /* MSI-X */
12473 struct hfi1_msix_entry *me = dd->msix_entries;
12474
12475 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012476 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012477 continue;
12478 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012479 free_irq(me->msix.vector, me->arg);
12480 }
12481 } else {
12482 /* INTx */
12483 if (dd->requested_intx_irq) {
12484 free_irq(dd->pcidev->irq, dd);
12485 dd->requested_intx_irq = 0;
12486 }
12487 }
12488
12489 /* turn off interrupts */
12490 if (dd->num_msix_entries) {
12491 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012492 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012493 } else {
12494 /* INTx */
12495 disable_intx(dd->pcidev);
12496 }
12497
12498 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012499 kfree(dd->msix_entries);
12500 dd->msix_entries = NULL;
12501 dd->num_msix_entries = 0;
12502}
12503
12504/*
12505 * Remap the interrupt source from the general handler to the given MSI-X
12506 * interrupt.
12507 */
12508static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12509{
12510 u64 reg;
12511 int m, n;
12512
12513 /* clear from the handled mask of the general interrupt */
12514 m = isrc / 64;
12515 n = isrc % 64;
12516 dd->gi_mask[m] &= ~((u64)1 << n);
12517
12518 /* direct the chip source to the given MSI-X interrupt */
12519 m = isrc / 8;
12520 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012521 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12522 reg &= ~((u64)0xff << (8 * n));
12523 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12524 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012525}
12526
12527static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12528 int engine, int msix_intr)
12529{
12530 /*
12531 * SDMA engine interrupt sources grouped by type, rather than
12532 * engine. Per-engine interrupts are as follows:
12533 * SDMA
12534 * SDMAProgress
12535 * SDMAIdle
12536 */
Jubin John8638b772016-02-14 20:19:24 -080012537 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012538 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012539 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012540 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012541 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012542 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012543}
12544
Mike Marciniszyn77241052015-07-30 15:17:43 -040012545static int request_intx_irq(struct hfi1_devdata *dd)
12546{
12547 int ret;
12548
Jubin John98050712015-11-16 21:59:27 -050012549 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12550 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012551 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012552 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012553 if (ret)
12554 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012555 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012556 else
12557 dd->requested_intx_irq = 1;
12558 return ret;
12559}
12560
12561static int request_msix_irqs(struct hfi1_devdata *dd)
12562{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012563 int first_general, last_general;
12564 int first_sdma, last_sdma;
12565 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012566 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012567
12568 /* calculate the ranges we are going to use */
12569 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012570 last_general = first_general + 1;
12571 first_sdma = last_general;
12572 last_sdma = first_sdma + dd->num_sdma;
12573 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012574 last_rx = first_rx + dd->n_krcv_queues;
12575
12576 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012577 * Sanity check - the code expects all SDMA chip source
12578 * interrupts to be in the same CSR, starting at bit 0. Verify
12579 * that this is true by checking the bit location of the start.
12580 */
12581 BUILD_BUG_ON(IS_SDMA_START % 64);
12582
12583 for (i = 0; i < dd->num_msix_entries; i++) {
12584 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12585 const char *err_info;
12586 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012587 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012588 void *arg;
12589 int idx;
12590 struct hfi1_ctxtdata *rcd = NULL;
12591 struct sdma_engine *sde = NULL;
12592
12593 /* obtain the arguments to request_irq */
12594 if (first_general <= i && i < last_general) {
12595 idx = i - first_general;
12596 handler = general_interrupt;
12597 arg = dd;
12598 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012599 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012600 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012601 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012602 } else if (first_sdma <= i && i < last_sdma) {
12603 idx = i - first_sdma;
12604 sde = &dd->per_sdma[idx];
12605 handler = sdma_interrupt;
12606 arg = sde;
12607 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012608 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012609 err_info = "sdma";
12610 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012611 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012612 } else if (first_rx <= i && i < last_rx) {
12613 idx = i - first_rx;
12614 rcd = dd->rcd[idx];
12615 /* no interrupt if no rcd */
12616 if (!rcd)
12617 continue;
12618 /*
12619 * Set the interrupt register and mask for this
12620 * context's interrupt.
12621 */
Jubin John8638b772016-02-14 20:19:24 -080012622 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012623 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012624 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012625 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012626 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012627 arg = rcd;
12628 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012629 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012630 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012631 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012632 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012633 } else {
12634 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012635 * ignore it
12636 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012637 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012638 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012639 continue;
12640 }
12641 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012642 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012643 continue;
12644 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012645 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012646
Dean Luickf4f30031c2015-10-26 10:28:44 -040012647 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012648 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012649 if (ret) {
12650 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012651 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12652 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012653 return ret;
12654 }
12655 /*
12656 * assign arg after request_irq call, so it will be
12657 * cleaned up
12658 */
12659 me->arg = arg;
12660
Mitko Haralanov957558c2016-02-03 14:33:40 -080012661 ret = hfi1_get_irq_affinity(dd, me);
12662 if (ret)
12663 dd_dev_err(dd,
12664 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012665 }
12666
Mike Marciniszyn77241052015-07-30 15:17:43 -040012667 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012668}
12669
12670/*
12671 * Set the general handler to accept all interrupts, remap all
12672 * chip interrupts back to MSI-X 0.
12673 */
12674static void reset_interrupts(struct hfi1_devdata *dd)
12675{
12676 int i;
12677
12678 /* all interrupts handled by the general handler */
12679 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12680 dd->gi_mask[i] = ~(u64)0;
12681
12682 /* all chip interrupts map to MSI-X 0 */
12683 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012684 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012685}
12686
12687static int set_up_interrupts(struct hfi1_devdata *dd)
12688{
12689 struct hfi1_msix_entry *entries;
12690 u32 total, request;
12691 int i, ret;
12692 int single_interrupt = 0; /* we expect to have all the interrupts */
12693
12694 /*
12695 * Interrupt count:
12696 * 1 general, "slow path" interrupt (includes the SDMA engines
12697 * slow source, SDMACleanupDone)
12698 * N interrupts - one per used SDMA engine
12699 * M interrupt - one per kernel receive context
12700 */
12701 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12702
12703 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12704 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012705 ret = -ENOMEM;
12706 goto fail;
12707 }
12708 /* 1-1 MSI-X entry assignment */
12709 for (i = 0; i < total; i++)
12710 entries[i].msix.entry = i;
12711
12712 /* ask for MSI-X interrupts */
12713 request = total;
12714 request_msix(dd, &request, entries);
12715
12716 if (request == 0) {
12717 /* using INTx */
12718 /* dd->num_msix_entries already zero */
12719 kfree(entries);
12720 single_interrupt = 1;
12721 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12722 } else {
12723 /* using MSI-X */
12724 dd->num_msix_entries = request;
12725 dd->msix_entries = entries;
12726
12727 if (request != total) {
12728 /* using MSI-X, with reduced interrupts */
12729 dd_dev_err(
12730 dd,
12731 "cannot handle reduced interrupt case, want %u, got %u\n",
12732 total, request);
12733 ret = -EINVAL;
12734 goto fail;
12735 }
12736 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12737 }
12738
12739 /* mask all interrupts */
12740 set_intr_state(dd, 0);
12741 /* clear all pending interrupts */
12742 clear_all_interrupts(dd);
12743
12744 /* reset general handler mask, chip MSI-X mappings */
12745 reset_interrupts(dd);
12746
12747 if (single_interrupt)
12748 ret = request_intx_irq(dd);
12749 else
12750 ret = request_msix_irqs(dd);
12751 if (ret)
12752 goto fail;
12753
12754 return 0;
12755
12756fail:
12757 clean_up_interrupts(dd);
12758 return ret;
12759}
12760
12761/*
12762 * Set up context values in dd. Sets:
12763 *
12764 * num_rcv_contexts - number of contexts being used
12765 * n_krcv_queues - number of kernel contexts
12766 * first_user_ctxt - first non-kernel context in array of contexts
12767 * freectxts - number of free user contexts
12768 * num_send_contexts - number of PIO send contexts being used
12769 */
12770static int set_up_context_variables(struct hfi1_devdata *dd)
12771{
12772 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012773 int total_contexts;
12774 int ret;
12775 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070012776 int qos_rmt_count;
12777 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012778
12779 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012780 * Kernel receive contexts:
12781 * - min of 2 or 1 context/numa (excluding control context)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012782 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070012783 * - Context 1 - first kernel context
12784 * - Context 2 - second kernel context
12785 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040012786 */
12787 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012788 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012789 * n_krcvqs is the sum of module parameter kernel receive
12790 * contexts, krcvqs[]. It does not include the control
12791 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012792 */
Dean Luick33a9eb52016-04-12 10:50:22 -070012793 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012794 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012795 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012796 num_kernel_contexts =
12797 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12798 /*
12799 * Every kernel receive context needs an ACK send context.
12800 * one send context is allocated for each VL{0-7} and VL15
12801 */
12802 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12803 dd_dev_err(dd,
12804 "Reducing # kernel rcv contexts to: %d, from %d\n",
12805 (int)(dd->chip_send_contexts - num_vls - 1),
12806 (int)num_kernel_contexts);
12807 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12808 }
12809 /*
Jubin John0852d242016-04-12 11:30:08 -070012810 * User contexts:
12811 * - default to 1 user context per real (non-HT) CPU core if
12812 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012813 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012814 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070012815 num_user_contexts =
12816 cpumask_weight(&dd->affinity->real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012817
12818 total_contexts = num_kernel_contexts + num_user_contexts;
12819
12820 /*
12821 * Adjust the counts given a global max.
12822 */
12823 if (total_contexts > dd->chip_rcv_contexts) {
12824 dd_dev_err(dd,
12825 "Reducing # user receive contexts to: %d, from %d\n",
12826 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12827 (int)num_user_contexts);
12828 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12829 /* recalculate */
12830 total_contexts = num_kernel_contexts + num_user_contexts;
12831 }
12832
Dean Luick8f000f72016-04-12 11:32:06 -070012833 /* each user context requires an entry in the RMT */
12834 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12835 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12836 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12837 dd_dev_err(dd,
12838 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12839 (int)num_user_contexts,
12840 user_rmt_reduced);
12841 /* recalculate */
12842 num_user_contexts = user_rmt_reduced;
12843 total_contexts = num_kernel_contexts + num_user_contexts;
12844 }
12845
Mike Marciniszyn77241052015-07-30 15:17:43 -040012846 /* the first N are kernel contexts, the rest are user contexts */
12847 dd->num_rcv_contexts = total_contexts;
12848 dd->n_krcv_queues = num_kernel_contexts;
12849 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012850 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012851 dd->freectxts = num_user_contexts;
12852 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012853 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12854 (int)dd->chip_rcv_contexts,
12855 (int)dd->num_rcv_contexts,
12856 (int)dd->n_krcv_queues,
12857 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012858
12859 /*
12860 * Receive array allocation:
12861 * All RcvArray entries are divided into groups of 8. This
12862 * is required by the hardware and will speed up writes to
12863 * consecutive entries by using write-combining of the entire
12864 * cacheline.
12865 *
12866 * The number of groups are evenly divided among all contexts.
12867 * any left over groups will be given to the first N user
12868 * contexts.
12869 */
12870 dd->rcv_entries.group_size = RCV_INCREMENT;
12871 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12872 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12873 dd->rcv_entries.nctxt_extra = ngroups -
12874 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12875 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12876 dd->rcv_entries.ngroups,
12877 dd->rcv_entries.nctxt_extra);
12878 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12879 MAX_EAGER_ENTRIES * 2) {
12880 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12881 dd->rcv_entries.group_size;
12882 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012883 "RcvArray group count too high, change to %u\n",
12884 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012885 dd->rcv_entries.nctxt_extra = 0;
12886 }
12887 /*
12888 * PIO send contexts
12889 */
12890 ret = init_sc_pools_and_sizes(dd);
12891 if (ret >= 0) { /* success */
12892 dd->num_send_contexts = ret;
12893 dd_dev_info(
12894 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012895 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040012896 dd->chip_send_contexts,
12897 dd->num_send_contexts,
12898 dd->sc_sizes[SC_KERNEL].count,
12899 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012900 dd->sc_sizes[SC_USER].count,
12901 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012902 ret = 0; /* success */
12903 }
12904
12905 return ret;
12906}
12907
12908/*
12909 * Set the device/port partition key table. The MAD code
12910 * will ensure that, at least, the partial management
12911 * partition key is present in the table.
12912 */
12913static void set_partition_keys(struct hfi1_pportdata *ppd)
12914{
12915 struct hfi1_devdata *dd = ppd->dd;
12916 u64 reg = 0;
12917 int i;
12918
12919 dd_dev_info(dd, "Setting partition keys\n");
12920 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12921 reg |= (ppd->pkeys[i] &
12922 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12923 ((i % 4) *
12924 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12925 /* Each register holds 4 PKey values. */
12926 if ((i % 4) == 3) {
12927 write_csr(dd, RCV_PARTITION_KEY +
12928 ((i - 3) * 2), reg);
12929 reg = 0;
12930 }
12931 }
12932
12933 /* Always enable HW pkeys check when pkeys table is set */
12934 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12935}
12936
12937/*
12938 * These CSRs and memories are uninitialized on reset and must be
12939 * written before reading to set the ECC/parity bits.
12940 *
12941 * NOTE: All user context CSRs that are not mmaped write-only
12942 * (e.g. the TID flows) must be initialized even if the driver never
12943 * reads them.
12944 */
12945static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12946{
12947 int i, j;
12948
12949 /* CceIntMap */
12950 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012951 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012952
12953 /* SendCtxtCreditReturnAddr */
12954 for (i = 0; i < dd->chip_send_contexts; i++)
12955 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12956
12957 /* PIO Send buffers */
12958 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080012959 /*
12960 * These are not normally read, and (presently) have no method
12961 * to be read, so are not pre-initialized
12962 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012963
12964 /* RcvHdrAddr */
12965 /* RcvHdrTailAddr */
12966 /* RcvTidFlowTable */
12967 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12968 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12969 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12970 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012971 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012972 }
12973
12974 /* RcvArray */
12975 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080012976 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080012977 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012978
12979 /* RcvQPMapTable */
12980 for (i = 0; i < 32; i++)
12981 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12982}
12983
12984/*
12985 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12986 */
12987static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12988 u64 ctrl_bits)
12989{
12990 unsigned long timeout;
12991 u64 reg;
12992
12993 /* is the condition present? */
12994 reg = read_csr(dd, CCE_STATUS);
12995 if ((reg & status_bits) == 0)
12996 return;
12997
12998 /* clear the condition */
12999 write_csr(dd, CCE_CTRL, ctrl_bits);
13000
13001 /* wait for the condition to clear */
13002 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13003 while (1) {
13004 reg = read_csr(dd, CCE_STATUS);
13005 if ((reg & status_bits) == 0)
13006 return;
13007 if (time_after(jiffies, timeout)) {
13008 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013009 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13010 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013011 return;
13012 }
13013 udelay(1);
13014 }
13015}
13016
13017/* set CCE CSRs to chip reset defaults */
13018static void reset_cce_csrs(struct hfi1_devdata *dd)
13019{
13020 int i;
13021
13022 /* CCE_REVISION read-only */
13023 /* CCE_REVISION2 read-only */
13024 /* CCE_CTRL - bits clear automatically */
13025 /* CCE_STATUS read-only, use CceCtrl to clear */
13026 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13027 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13028 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13029 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13030 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13031 /* CCE_ERR_STATUS read-only */
13032 write_csr(dd, CCE_ERR_MASK, 0);
13033 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13034 /* CCE_ERR_FORCE leave alone */
13035 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13036 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13037 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13038 /* CCE_PCIE_CTRL leave alone */
13039 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13040 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13041 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013042 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013043 }
13044 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13045 /* CCE_MSIX_PBA read-only */
13046 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13047 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13048 }
13049 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13050 write_csr(dd, CCE_INT_MAP, 0);
13051 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13052 /* CCE_INT_STATUS read-only */
13053 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13054 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13055 /* CCE_INT_FORCE leave alone */
13056 /* CCE_INT_BLOCKED read-only */
13057 }
13058 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13059 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13060}
13061
Mike Marciniszyn77241052015-07-30 15:17:43 -040013062/* set MISC CSRs to chip reset defaults */
13063static void reset_misc_csrs(struct hfi1_devdata *dd)
13064{
13065 int i;
13066
13067 for (i = 0; i < 32; i++) {
13068 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13069 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13070 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13071 }
Jubin John4d114fd2016-02-14 20:21:43 -080013072 /*
13073 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13074 * only be written 128-byte chunks
13075 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013076 /* init RSA engine to clear lingering errors */
13077 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13078 write_csr(dd, MISC_CFG_RSA_MU, 0);
13079 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13080 /* MISC_STS_8051_DIGEST read-only */
13081 /* MISC_STS_SBM_DIGEST read-only */
13082 /* MISC_STS_PCIE_DIGEST read-only */
13083 /* MISC_STS_FAB_DIGEST read-only */
13084 /* MISC_ERR_STATUS read-only */
13085 write_csr(dd, MISC_ERR_MASK, 0);
13086 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13087 /* MISC_ERR_FORCE leave alone */
13088}
13089
13090/* set TXE CSRs to chip reset defaults */
13091static void reset_txe_csrs(struct hfi1_devdata *dd)
13092{
13093 int i;
13094
13095 /*
13096 * TXE Kernel CSRs
13097 */
13098 write_csr(dd, SEND_CTRL, 0);
13099 __cm_reset(dd, 0); /* reset CM internal state */
13100 /* SEND_CONTEXTS read-only */
13101 /* SEND_DMA_ENGINES read-only */
13102 /* SEND_PIO_MEM_SIZE read-only */
13103 /* SEND_DMA_MEM_SIZE read-only */
13104 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13105 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13106 /* SEND_PIO_ERR_STATUS read-only */
13107 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13108 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13109 /* SEND_PIO_ERR_FORCE leave alone */
13110 /* SEND_DMA_ERR_STATUS read-only */
13111 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13112 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13113 /* SEND_DMA_ERR_FORCE leave alone */
13114 /* SEND_EGRESS_ERR_STATUS read-only */
13115 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13116 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13117 /* SEND_EGRESS_ERR_FORCE leave alone */
13118 write_csr(dd, SEND_BTH_QP, 0);
13119 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13120 write_csr(dd, SEND_SC2VLT0, 0);
13121 write_csr(dd, SEND_SC2VLT1, 0);
13122 write_csr(dd, SEND_SC2VLT2, 0);
13123 write_csr(dd, SEND_SC2VLT3, 0);
13124 write_csr(dd, SEND_LEN_CHECK0, 0);
13125 write_csr(dd, SEND_LEN_CHECK1, 0);
13126 /* SEND_ERR_STATUS read-only */
13127 write_csr(dd, SEND_ERR_MASK, 0);
13128 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13129 /* SEND_ERR_FORCE read-only */
13130 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013131 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013132 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013133 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13134 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13135 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013136 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013137 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013138 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013139 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013140 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013141 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013142 /* SEND_CM_CREDIT_USED_STATUS read-only */
13143 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13144 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13145 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13146 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13147 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13148 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013149 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013150 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13151 /* SEND_CM_CREDIT_USED_VL read-only */
13152 /* SEND_CM_CREDIT_USED_VL15 read-only */
13153 /* SEND_EGRESS_CTXT_STATUS read-only */
13154 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13155 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13156 /* SEND_EGRESS_ERR_INFO read-only */
13157 /* SEND_EGRESS_ERR_SOURCE read-only */
13158
13159 /*
13160 * TXE Per-Context CSRs
13161 */
13162 for (i = 0; i < dd->chip_send_contexts; i++) {
13163 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13164 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13165 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13166 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13167 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13168 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13169 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13170 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13171 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13172 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13173 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13174 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13175 }
13176
13177 /*
13178 * TXE Per-SDMA CSRs
13179 */
13180 for (i = 0; i < dd->chip_sdma_engines; i++) {
13181 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13182 /* SEND_DMA_STATUS read-only */
13183 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13184 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13185 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13186 /* SEND_DMA_HEAD read-only */
13187 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13188 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13189 /* SEND_DMA_IDLE_CNT read-only */
13190 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13191 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13192 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13193 /* SEND_DMA_ENG_ERR_STATUS read-only */
13194 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13195 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13196 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13197 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13198 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13199 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13200 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13201 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13202 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13203 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13204 }
13205}
13206
13207/*
13208 * Expect on entry:
13209 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13210 */
13211static void init_rbufs(struct hfi1_devdata *dd)
13212{
13213 u64 reg;
13214 int count;
13215
13216 /*
13217 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13218 * clear.
13219 */
13220 count = 0;
13221 while (1) {
13222 reg = read_csr(dd, RCV_STATUS);
13223 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13224 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13225 break;
13226 /*
13227 * Give up after 1ms - maximum wait time.
13228 *
13229 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13230 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13231 * 148 KB / (66% * 250MB/s) = 920us
13232 */
13233 if (count++ > 500) {
13234 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013235 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13236 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013237 break;
13238 }
13239 udelay(2); /* do not busy-wait the CSR */
13240 }
13241
13242 /* start the init - expect RcvCtrl to be 0 */
13243 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13244
13245 /*
13246 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13247 * period after the write before RcvStatus.RxRbufInitDone is valid.
13248 * The delay in the first run through the loop below is sufficient and
13249 * required before the first read of RcvStatus.RxRbufInintDone.
13250 */
13251 read_csr(dd, RCV_CTRL);
13252
13253 /* wait for the init to finish */
13254 count = 0;
13255 while (1) {
13256 /* delay is required first time through - see above */
13257 udelay(2); /* do not busy-wait the CSR */
13258 reg = read_csr(dd, RCV_STATUS);
13259 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13260 break;
13261
13262 /* give up after 100us - slowest possible at 33MHz is 73us */
13263 if (count++ > 50) {
13264 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013265 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13266 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013267 break;
13268 }
13269 }
13270}
13271
13272/* set RXE CSRs to chip reset defaults */
13273static void reset_rxe_csrs(struct hfi1_devdata *dd)
13274{
13275 int i, j;
13276
13277 /*
13278 * RXE Kernel CSRs
13279 */
13280 write_csr(dd, RCV_CTRL, 0);
13281 init_rbufs(dd);
13282 /* RCV_STATUS read-only */
13283 /* RCV_CONTEXTS read-only */
13284 /* RCV_ARRAY_CNT read-only */
13285 /* RCV_BUF_SIZE read-only */
13286 write_csr(dd, RCV_BTH_QP, 0);
13287 write_csr(dd, RCV_MULTICAST, 0);
13288 write_csr(dd, RCV_BYPASS, 0);
13289 write_csr(dd, RCV_VL15, 0);
13290 /* this is a clear-down */
13291 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013292 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013293 /* RCV_ERR_STATUS read-only */
13294 write_csr(dd, RCV_ERR_MASK, 0);
13295 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13296 /* RCV_ERR_FORCE leave alone */
13297 for (i = 0; i < 32; i++)
13298 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13299 for (i = 0; i < 4; i++)
13300 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13301 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13302 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13303 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13304 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13305 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13306 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13307 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13308 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13309 }
13310 for (i = 0; i < 32; i++)
13311 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13312
13313 /*
13314 * RXE Kernel and User Per-Context CSRs
13315 */
13316 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13317 /* kernel */
13318 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13319 /* RCV_CTXT_STATUS read-only */
13320 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13321 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13322 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13323 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13324 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13325 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13326 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13327 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13328 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13329 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13330
13331 /* user */
13332 /* RCV_HDR_TAIL read-only */
13333 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13334 /* RCV_EGR_INDEX_TAIL read-only */
13335 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13336 /* RCV_EGR_OFFSET_TAIL read-only */
13337 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013338 write_uctxt_csr(dd, i,
13339 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013340 }
13341 }
13342}
13343
13344/*
13345 * Set sc2vl tables.
13346 *
13347 * They power on to zeros, so to avoid send context errors
13348 * they need to be set:
13349 *
13350 * SC 0-7 -> VL 0-7 (respectively)
13351 * SC 15 -> VL 15
13352 * otherwise
13353 * -> VL 0
13354 */
13355static void init_sc2vl_tables(struct hfi1_devdata *dd)
13356{
13357 int i;
13358 /* init per architecture spec, constrained by hardware capability */
13359
13360 /* HFI maps sent packets */
13361 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13362 0,
13363 0, 0, 1, 1,
13364 2, 2, 3, 3,
13365 4, 4, 5, 5,
13366 6, 6, 7, 7));
13367 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13368 1,
13369 8, 0, 9, 0,
13370 10, 0, 11, 0,
13371 12, 0, 13, 0,
13372 14, 0, 15, 15));
13373 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13374 2,
13375 16, 0, 17, 0,
13376 18, 0, 19, 0,
13377 20, 0, 21, 0,
13378 22, 0, 23, 0));
13379 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13380 3,
13381 24, 0, 25, 0,
13382 26, 0, 27, 0,
13383 28, 0, 29, 0,
13384 30, 0, 31, 0));
13385
13386 /* DC maps received packets */
13387 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13388 15_0,
13389 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13390 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13391 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13392 31_16,
13393 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13394 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13395
13396 /* initialize the cached sc2vl values consistently with h/w */
13397 for (i = 0; i < 32; i++) {
13398 if (i < 8 || i == 15)
13399 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13400 else
13401 *((u8 *)(dd->sc2vl) + i) = 0;
13402 }
13403}
13404
13405/*
13406 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13407 * depend on the chip going through a power-on reset - a driver may be loaded
13408 * and unloaded many times.
13409 *
13410 * Do not write any CSR values to the chip in this routine - there may be
13411 * a reset following the (possible) FLR in this routine.
13412 *
13413 */
13414static void init_chip(struct hfi1_devdata *dd)
13415{
13416 int i;
13417
13418 /*
13419 * Put the HFI CSRs in a known state.
13420 * Combine this with a DC reset.
13421 *
13422 * Stop the device from doing anything while we do a
13423 * reset. We know there are no other active users of
13424 * the device since we are now in charge. Turn off
13425 * off all outbound and inbound traffic and make sure
13426 * the device does not generate any interrupts.
13427 */
13428
13429 /* disable send contexts and SDMA engines */
13430 write_csr(dd, SEND_CTRL, 0);
13431 for (i = 0; i < dd->chip_send_contexts; i++)
13432 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13433 for (i = 0; i < dd->chip_sdma_engines; i++)
13434 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13435 /* disable port (turn off RXE inbound traffic) and contexts */
13436 write_csr(dd, RCV_CTRL, 0);
13437 for (i = 0; i < dd->chip_rcv_contexts; i++)
13438 write_csr(dd, RCV_CTXT_CTRL, 0);
13439 /* mask all interrupt sources */
13440 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013441 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013442
13443 /*
13444 * DC Reset: do a full DC reset before the register clear.
13445 * A recommended length of time to hold is one CSR read,
13446 * so reread the CceDcCtrl. Then, hold the DC in reset
13447 * across the clear.
13448 */
13449 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013450 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013451
13452 if (use_flr) {
13453 /*
13454 * A FLR will reset the SPC core and part of the PCIe.
13455 * The parts that need to be restored have already been
13456 * saved.
13457 */
13458 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13459
13460 /* do the FLR, the DC reset will remain */
13461 hfi1_pcie_flr(dd);
13462
13463 /* restore command and BARs */
13464 restore_pci_variables(dd);
13465
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013466 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013467 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13468 hfi1_pcie_flr(dd);
13469 restore_pci_variables(dd);
13470 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013471 } else {
13472 dd_dev_info(dd, "Resetting CSRs with writes\n");
13473 reset_cce_csrs(dd);
13474 reset_txe_csrs(dd);
13475 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013476 reset_misc_csrs(dd);
13477 }
13478 /* clear the DC reset */
13479 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013480
Mike Marciniszyn77241052015-07-30 15:17:43 -040013481 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013482 setextled(dd, 0);
13483
Mike Marciniszyn77241052015-07-30 15:17:43 -040013484 /*
13485 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013486 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013487 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013488 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013489 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013490 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013491 * I2CCLK and I2CDAT will change per direction, and INT_N and
13492 * MODPRS_N are input only and their value is ignored.
13493 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013494 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13495 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013496 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013497}
13498
13499static void init_early_variables(struct hfi1_devdata *dd)
13500{
13501 int i;
13502
13503 /* assign link credit variables */
13504 dd->vau = CM_VAU;
13505 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013506 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013507 dd->link_credits--;
13508 dd->vcu = cu_to_vcu(hfi1_cu);
13509 /* enough room for 8 MAD packets plus header - 17K */
13510 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13511 if (dd->vl15_init > dd->link_credits)
13512 dd->vl15_init = dd->link_credits;
13513
13514 write_uninitialized_csrs_and_memories(dd);
13515
13516 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13517 for (i = 0; i < dd->num_pports; i++) {
13518 struct hfi1_pportdata *ppd = &dd->pport[i];
13519
13520 set_partition_keys(ppd);
13521 }
13522 init_sc2vl_tables(dd);
13523}
13524
13525static void init_kdeth_qp(struct hfi1_devdata *dd)
13526{
13527 /* user changed the KDETH_QP */
13528 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13529 /* out of range or illegal value */
13530 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13531 kdeth_qp = 0;
13532 }
13533 if (kdeth_qp == 0) /* not set, or failed range check */
13534 kdeth_qp = DEFAULT_KDETH_QP;
13535
13536 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013537 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13538 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013539
13540 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013541 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13542 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013543}
13544
13545/**
13546 * init_qpmap_table
13547 * @dd - device data
13548 * @first_ctxt - first context
13549 * @last_ctxt - first context
13550 *
13551 * This return sets the qpn mapping table that
13552 * is indexed by qpn[8:1].
13553 *
13554 * The routine will round robin the 256 settings
13555 * from first_ctxt to last_ctxt.
13556 *
13557 * The first/last looks ahead to having specialized
13558 * receive contexts for mgmt and bypass. Normal
13559 * verbs traffic will assumed to be on a range
13560 * of receive contexts.
13561 */
13562static void init_qpmap_table(struct hfi1_devdata *dd,
13563 u32 first_ctxt,
13564 u32 last_ctxt)
13565{
13566 u64 reg = 0;
13567 u64 regno = RCV_QP_MAP_TABLE;
13568 int i;
13569 u64 ctxt = first_ctxt;
13570
Dean Luick60d585ad2016-04-12 10:50:35 -070013571 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013572 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013573 ctxt++;
13574 if (ctxt > last_ctxt)
13575 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070013576 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013577 write_csr(dd, regno, reg);
13578 reg = 0;
13579 regno += 8;
13580 }
13581 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013582
13583 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13584 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13585}
13586
Dean Luick372cc85a2016-04-12 11:30:51 -070013587struct rsm_map_table {
13588 u64 map[NUM_MAP_REGS];
13589 unsigned int used;
13590};
13591
Dean Luickb12349a2016-04-12 11:31:33 -070013592struct rsm_rule_data {
13593 u8 offset;
13594 u8 pkt_type;
13595 u32 field1_off;
13596 u32 field2_off;
13597 u32 index1_off;
13598 u32 index1_width;
13599 u32 index2_off;
13600 u32 index2_width;
13601 u32 mask1;
13602 u32 value1;
13603 u32 mask2;
13604 u32 value2;
13605};
13606
Dean Luick372cc85a2016-04-12 11:30:51 -070013607/*
13608 * Return an initialized RMT map table for users to fill in. OK if it
13609 * returns NULL, indicating no table.
13610 */
13611static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13612{
13613 struct rsm_map_table *rmt;
13614 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13615
13616 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13617 if (rmt) {
13618 memset(rmt->map, rxcontext, sizeof(rmt->map));
13619 rmt->used = 0;
13620 }
13621
13622 return rmt;
13623}
13624
13625/*
13626 * Write the final RMT map table to the chip and free the table. OK if
13627 * table is NULL.
13628 */
13629static void complete_rsm_map_table(struct hfi1_devdata *dd,
13630 struct rsm_map_table *rmt)
13631{
13632 int i;
13633
13634 if (rmt) {
13635 /* write table to chip */
13636 for (i = 0; i < NUM_MAP_REGS; i++)
13637 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13638
13639 /* enable RSM */
13640 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13641 }
13642}
13643
Dean Luickb12349a2016-04-12 11:31:33 -070013644/*
13645 * Add a receive side mapping rule.
13646 */
13647static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13648 struct rsm_rule_data *rrd)
13649{
13650 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13651 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13652 1ull << rule_index | /* enable bit */
13653 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13654 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13655 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13656 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13657 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13658 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13659 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13660 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13661 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13662 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13663 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13664 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13665 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13666}
13667
Dean Luick4a818be2016-04-12 11:31:11 -070013668/* return the number of RSM map table entries that will be used for QOS */
13669static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13670 unsigned int *np)
13671{
13672 int i;
13673 unsigned int m, n;
13674 u8 max_by_vl = 0;
13675
13676 /* is QOS active at all? */
13677 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13678 num_vls == 1 ||
13679 krcvqsset <= 1)
13680 goto no_qos;
13681
13682 /* determine bits for qpn */
13683 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13684 if (krcvqs[i] > max_by_vl)
13685 max_by_vl = krcvqs[i];
13686 if (max_by_vl > 32)
13687 goto no_qos;
13688 m = ilog2(__roundup_pow_of_two(max_by_vl));
13689
13690 /* determine bits for vl */
13691 n = ilog2(__roundup_pow_of_two(num_vls));
13692
13693 /* reject if too much is used */
13694 if ((m + n) > 7)
13695 goto no_qos;
13696
13697 if (mp)
13698 *mp = m;
13699 if (np)
13700 *np = n;
13701
13702 return 1 << (m + n);
13703
13704no_qos:
13705 if (mp)
13706 *mp = 0;
13707 if (np)
13708 *np = 0;
13709 return 0;
13710}
13711
Mike Marciniszyn77241052015-07-30 15:17:43 -040013712/**
13713 * init_qos - init RX qos
13714 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070013715 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040013716 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013717 * This routine initializes Rule 0 and the RSM map table to implement
13718 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040013719 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013720 * If all of the limit tests succeed, qos is applied based on the array
13721 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013722 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013723 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13724 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013725 */
Dean Luick372cc85a2016-04-12 11:30:51 -070013726static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013727{
Dean Luickb12349a2016-04-12 11:31:33 -070013728 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013729 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070013730 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013731 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013732
Dean Luick4a818be2016-04-12 11:31:11 -070013733 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013734 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013735 rmt_entries = qos_rmt_entries(dd, &m, &n);
13736 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013737 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013738 qpns_per_vl = 1 << m;
13739
Dean Luick372cc85a2016-04-12 11:30:51 -070013740 /* enough room in the map table? */
13741 rmt_entries = 1 << (m + n);
13742 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013743 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013744
Dean Luick372cc85a2016-04-12 11:30:51 -070013745 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070013746 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013747 unsigned tctxt;
13748
13749 for (qpn = 0, tctxt = ctxt;
13750 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13751 unsigned idx, regoff, regidx;
13752
Dean Luick372cc85a2016-04-12 11:30:51 -070013753 /* generate the index the hardware will produce */
13754 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013755 regoff = (idx % 8) * 8;
13756 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070013757 /* replace default with context number */
13758 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040013759 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13760 << regoff);
13761 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070013762 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013763 if (tctxt == ctxt + krcvqs[i])
13764 tctxt = ctxt;
13765 }
13766 ctxt += krcvqs[i];
13767 }
Dean Luickb12349a2016-04-12 11:31:33 -070013768
13769 rrd.offset = rmt->used;
13770 rrd.pkt_type = 2;
13771 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13772 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13773 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13774 rrd.index1_width = n;
13775 rrd.index2_off = QPN_SELECT_OFFSET;
13776 rrd.index2_width = m + n;
13777 rrd.mask1 = LRH_BTH_MASK;
13778 rrd.value1 = LRH_BTH_VALUE;
13779 rrd.mask2 = LRH_SC_MASK;
13780 rrd.value2 = LRH_SC_VALUE;
13781
13782 /* add rule 0 */
13783 add_rsm_rule(dd, 0, &rrd);
13784
Dean Luick372cc85a2016-04-12 11:30:51 -070013785 /* mark RSM map entries as used */
13786 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070013787 /* map everything else to the mcast/err/vl15 context */
13788 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013789 dd->qos_shift = n + 1;
13790 return;
13791bail:
13792 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013793 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013794}
13795
Dean Luick8f000f72016-04-12 11:32:06 -070013796static void init_user_fecn_handling(struct hfi1_devdata *dd,
13797 struct rsm_map_table *rmt)
13798{
13799 struct rsm_rule_data rrd;
13800 u64 reg;
13801 int i, idx, regoff, regidx;
13802 u8 offset;
13803
13804 /* there needs to be enough room in the map table */
13805 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13806 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13807 return;
13808 }
13809
13810 /*
13811 * RSM will extract the destination context as an index into the
13812 * map table. The destination contexts are a sequential block
13813 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13814 * Map entries are accessed as offset + extracted value. Adjust
13815 * the added offset so this sequence can be placed anywhere in
13816 * the table - as long as the entries themselves do not wrap.
13817 * There are only enough bits in offset for the table size, so
13818 * start with that to allow for a "negative" offset.
13819 */
13820 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13821 (int)dd->first_user_ctxt);
13822
13823 for (i = dd->first_user_ctxt, idx = rmt->used;
13824 i < dd->num_rcv_contexts; i++, idx++) {
13825 /* replace with identity mapping */
13826 regoff = (idx % 8) * 8;
13827 regidx = idx / 8;
13828 reg = rmt->map[regidx];
13829 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13830 reg |= (u64)i << regoff;
13831 rmt->map[regidx] = reg;
13832 }
13833
13834 /*
13835 * For RSM intercept of Expected FECN packets:
13836 * o packet type 0 - expected
13837 * o match on F (bit 95), using select/match 1, and
13838 * o match on SH (bit 133), using select/match 2.
13839 *
13840 * Use index 1 to extract the 8-bit receive context from DestQP
13841 * (start at bit 64). Use that as the RSM map table index.
13842 */
13843 rrd.offset = offset;
13844 rrd.pkt_type = 0;
13845 rrd.field1_off = 95;
13846 rrd.field2_off = 133;
13847 rrd.index1_off = 64;
13848 rrd.index1_width = 8;
13849 rrd.index2_off = 0;
13850 rrd.index2_width = 0;
13851 rrd.mask1 = 1;
13852 rrd.value1 = 1;
13853 rrd.mask2 = 1;
13854 rrd.value2 = 1;
13855
13856 /* add rule 1 */
13857 add_rsm_rule(dd, 1, &rrd);
13858
13859 rmt->used += dd->num_user_contexts;
13860}
13861
Mike Marciniszyn77241052015-07-30 15:17:43 -040013862static void init_rxe(struct hfi1_devdata *dd)
13863{
Dean Luick372cc85a2016-04-12 11:30:51 -070013864 struct rsm_map_table *rmt;
13865
Mike Marciniszyn77241052015-07-30 15:17:43 -040013866 /* enable all receive errors */
13867 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070013868
13869 rmt = alloc_rsm_map_table(dd);
13870 /* set up QOS, including the QPN map table */
13871 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070013872 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070013873 complete_rsm_map_table(dd, rmt);
13874 kfree(rmt);
13875
Mike Marciniszyn77241052015-07-30 15:17:43 -040013876 /*
13877 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13878 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13879 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13880 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13881 * Max_PayLoad_Size set to its minimum of 128.
13882 *
13883 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13884 * (64 bytes). Max_Payload_Size is possibly modified upward in
13885 * tune_pcie_caps() which is called after this routine.
13886 */
13887}
13888
13889static void init_other(struct hfi1_devdata *dd)
13890{
13891 /* enable all CCE errors */
13892 write_csr(dd, CCE_ERR_MASK, ~0ull);
13893 /* enable *some* Misc errors */
13894 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13895 /* enable all DC errors, except LCB */
13896 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13897 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13898}
13899
13900/*
13901 * Fill out the given AU table using the given CU. A CU is defined in terms
13902 * AUs. The table is a an encoding: given the index, how many AUs does that
13903 * represent?
13904 *
13905 * NOTE: Assumes that the register layout is the same for the
13906 * local and remote tables.
13907 */
13908static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13909 u32 csr0to3, u32 csr4to7)
13910{
13911 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080013912 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13913 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13914 2ull * cu <<
13915 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13916 4ull * cu <<
13917 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013918 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080013919 8ull * cu <<
13920 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13921 16ull * cu <<
13922 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13923 32ull * cu <<
13924 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13925 64ull * cu <<
13926 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013927}
13928
13929static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13930{
13931 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013932 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013933}
13934
13935void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13936{
13937 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013938 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013939}
13940
13941static void init_txe(struct hfi1_devdata *dd)
13942{
13943 int i;
13944
13945 /* enable all PIO, SDMA, general, and Egress errors */
13946 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13947 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13948 write_csr(dd, SEND_ERR_MASK, ~0ull);
13949 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13950
13951 /* enable all per-context and per-SDMA engine errors */
13952 for (i = 0; i < dd->chip_send_contexts; i++)
13953 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13954 for (i = 0; i < dd->chip_sdma_engines; i++)
13955 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13956
13957 /* set the local CU to AU mapping */
13958 assign_local_cm_au_table(dd, dd->vcu);
13959
13960 /*
13961 * Set reasonable default for Credit Return Timer
13962 * Don't set on Simulator - causes it to choke.
13963 */
13964 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13965 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13966}
13967
13968int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13969{
13970 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13971 unsigned sctxt;
13972 int ret = 0;
13973 u64 reg;
13974
13975 if (!rcd || !rcd->sc) {
13976 ret = -EINVAL;
13977 goto done;
13978 }
13979 sctxt = rcd->sc->hw_context;
13980 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13981 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13982 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13983 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13984 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13985 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13986 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13987 /*
13988 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013989 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013990 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013991 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13992 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13993 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13994 }
13995
13996 /* Enable J_KEY check on receive context. */
13997 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13998 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13999 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14000 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14001done:
14002 return ret;
14003}
14004
14005int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14006{
14007 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14008 unsigned sctxt;
14009 int ret = 0;
14010 u64 reg;
14011
14012 if (!rcd || !rcd->sc) {
14013 ret = -EINVAL;
14014 goto done;
14015 }
14016 sctxt = rcd->sc->hw_context;
14017 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14018 /*
14019 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14020 * This check would not have been enabled for A0 h/w, see
14021 * set_ctxt_jkey().
14022 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014023 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014024 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14025 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14026 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14027 }
14028 /* Turn off the J_KEY on the receive side */
14029 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14030done:
14031 return ret;
14032}
14033
14034int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14035{
14036 struct hfi1_ctxtdata *rcd;
14037 unsigned sctxt;
14038 int ret = 0;
14039 u64 reg;
14040
Jubin Johne4909742016-02-14 20:22:00 -080014041 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014042 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014043 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014044 ret = -EINVAL;
14045 goto done;
14046 }
14047 if (!rcd || !rcd->sc) {
14048 ret = -EINVAL;
14049 goto done;
14050 }
14051 sctxt = rcd->sc->hw_context;
14052 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14053 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14054 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14055 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14056 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014057 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014058 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14059done:
14060 return ret;
14061}
14062
14063int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14064{
14065 struct hfi1_ctxtdata *rcd;
14066 unsigned sctxt;
14067 int ret = 0;
14068 u64 reg;
14069
Jubin Johne4909742016-02-14 20:22:00 -080014070 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014071 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014072 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014073 ret = -EINVAL;
14074 goto done;
14075 }
14076 if (!rcd || !rcd->sc) {
14077 ret = -EINVAL;
14078 goto done;
14079 }
14080 sctxt = rcd->sc->hw_context;
14081 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14082 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14083 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14084 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14085done:
14086 return ret;
14087}
14088
14089/*
14090 * Start doing the clean up the the chip. Our clean up happens in multiple
14091 * stages and this is just the first.
14092 */
14093void hfi1_start_cleanup(struct hfi1_devdata *dd)
14094{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014095 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014096 free_cntrs(dd);
14097 free_rcverr(dd);
14098 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014099 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014100}
14101
14102#define HFI_BASE_GUID(dev) \
14103 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14104
14105/*
Dean Luick78eb1292016-03-05 08:49:45 -080014106 * Information can be shared between the two HFIs on the same ASIC
14107 * in the same OS. This function finds the peer device and sets
14108 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014109 */
Dean Luick78eb1292016-03-05 08:49:45 -080014110static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014111{
14112 unsigned long flags;
14113 struct hfi1_devdata *tmp, *peer = NULL;
Dean Luick78eb1292016-03-05 08:49:45 -080014114 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014115
14116 spin_lock_irqsave(&hfi1_devs_lock, flags);
14117 /* Find our peer device */
14118 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14119 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14120 dd->unit != tmp->unit) {
14121 peer = tmp;
14122 break;
14123 }
14124 }
14125
Dean Luick78eb1292016-03-05 08:49:45 -080014126 if (peer) {
14127 dd->asic_data = peer->asic_data;
14128 } else {
14129 dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14130 if (!dd->asic_data) {
14131 ret = -ENOMEM;
14132 goto done;
14133 }
14134 mutex_init(&dd->asic_data->asic_resource_mutex);
14135 }
14136 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14137
14138done:
Mike Marciniszyn77241052015-07-30 15:17:43 -040014139 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luick78eb1292016-03-05 08:49:45 -080014140 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014141}
14142
Dean Luick5d9157a2015-11-16 21:59:34 -050014143/*
14144 * Set dd->boardname. Use a generic name if a name is not returned from
14145 * EFI variable space.
14146 *
14147 * Return 0 on success, -ENOMEM if space could not be allocated.
14148 */
14149static int obtain_boardname(struct hfi1_devdata *dd)
14150{
14151 /* generic board description */
14152 const char generic[] =
14153 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14154 unsigned long size;
14155 int ret;
14156
14157 ret = read_hfi1_efi_var(dd, "description", &size,
14158 (void **)&dd->boardname);
14159 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014160 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014161 /* use generic description */
14162 dd->boardname = kstrdup(generic, GFP_KERNEL);
14163 if (!dd->boardname)
14164 return -ENOMEM;
14165 }
14166 return 0;
14167}
14168
Kaike Wan24487dd2016-02-26 13:33:23 -080014169/*
14170 * Check the interrupt registers to make sure that they are mapped correctly.
14171 * It is intended to help user identify any mismapping by VMM when the driver
14172 * is running in a VM. This function should only be called before interrupt
14173 * is set up properly.
14174 *
14175 * Return 0 on success, -EINVAL on failure.
14176 */
14177static int check_int_registers(struct hfi1_devdata *dd)
14178{
14179 u64 reg;
14180 u64 all_bits = ~(u64)0;
14181 u64 mask;
14182
14183 /* Clear CceIntMask[0] to avoid raising any interrupts */
14184 mask = read_csr(dd, CCE_INT_MASK);
14185 write_csr(dd, CCE_INT_MASK, 0ull);
14186 reg = read_csr(dd, CCE_INT_MASK);
14187 if (reg)
14188 goto err_exit;
14189
14190 /* Clear all interrupt status bits */
14191 write_csr(dd, CCE_INT_CLEAR, all_bits);
14192 reg = read_csr(dd, CCE_INT_STATUS);
14193 if (reg)
14194 goto err_exit;
14195
14196 /* Set all interrupt status bits */
14197 write_csr(dd, CCE_INT_FORCE, all_bits);
14198 reg = read_csr(dd, CCE_INT_STATUS);
14199 if (reg != all_bits)
14200 goto err_exit;
14201
14202 /* Restore the interrupt mask */
14203 write_csr(dd, CCE_INT_CLEAR, all_bits);
14204 write_csr(dd, CCE_INT_MASK, mask);
14205
14206 return 0;
14207err_exit:
14208 write_csr(dd, CCE_INT_MASK, mask);
14209 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14210 return -EINVAL;
14211}
14212
Mike Marciniszyn77241052015-07-30 15:17:43 -040014213/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014214 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014215 * @dev: the pci_dev for hfi1_ib device
14216 * @ent: pci_device_id struct for this dev
14217 *
14218 * Also allocates, initializes, and returns the devdata struct for this
14219 * device instance
14220 *
14221 * This is global, and is called directly at init to set up the
14222 * chip-specific function pointers for later use.
14223 */
14224struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14225 const struct pci_device_id *ent)
14226{
14227 struct hfi1_devdata *dd;
14228 struct hfi1_pportdata *ppd;
14229 u64 reg;
14230 int i, ret;
14231 static const char * const inames[] = { /* implementation names */
14232 "RTL silicon",
14233 "RTL VCS simulation",
14234 "RTL FPGA emulation",
14235 "Functional simulator"
14236 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014237 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014238
Jubin John17fb4f22016-02-14 20:21:52 -080014239 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14240 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014241 if (IS_ERR(dd))
14242 goto bail;
14243 ppd = dd->pport;
14244 for (i = 0; i < dd->num_pports; i++, ppd++) {
14245 int vl;
14246 /* init common fields */
14247 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14248 /* DC supports 4 link widths */
14249 ppd->link_width_supported =
14250 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14251 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14252 ppd->link_width_downgrade_supported =
14253 ppd->link_width_supported;
14254 /* start out enabling only 4X */
14255 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14256 ppd->link_width_downgrade_enabled =
14257 ppd->link_width_downgrade_supported;
14258 /* link width active is 0 when link is down */
14259 /* link width downgrade active is 0 when link is down */
14260
Jubin Johnd0d236e2016-02-14 20:20:15 -080014261 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14262 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014263 hfi1_early_err(&pdev->dev,
14264 "Invalid num_vls %u, using %u VLs\n",
14265 num_vls, HFI1_MAX_VLS_SUPPORTED);
14266 num_vls = HFI1_MAX_VLS_SUPPORTED;
14267 }
14268 ppd->vls_supported = num_vls;
14269 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014270 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014271 /* Set the default MTU. */
14272 for (vl = 0; vl < num_vls; vl++)
14273 dd->vld[vl].mtu = hfi1_max_mtu;
14274 dd->vld[15].mtu = MAX_MAD_PACKET;
14275 /*
14276 * Set the initial values to reasonable default, will be set
14277 * for real when link is up.
14278 */
14279 ppd->lstate = IB_PORT_DOWN;
14280 ppd->overrun_threshold = 0x4;
14281 ppd->phy_error_threshold = 0xf;
14282 ppd->port_crc_mode_enabled = link_crc_mask;
14283 /* initialize supported LTP CRC mode */
14284 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14285 /* initialize enabled LTP CRC mode */
14286 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14287 /* start in offline */
14288 ppd->host_link_state = HLS_DN_OFFLINE;
14289 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014290 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014291 }
14292
14293 dd->link_default = HLS_DN_POLL;
14294
14295 /*
14296 * Do remaining PCIe setup and save PCIe values in dd.
14297 * Any error printing is already done by the init code.
14298 * On return, we have the chip mapped.
14299 */
14300 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14301 if (ret < 0)
14302 goto bail_free;
14303
14304 /* verify that reads actually work, save revision for reset check */
14305 dd->revision = read_csr(dd, CCE_REVISION);
14306 if (dd->revision == ~(u64)0) {
14307 dd_dev_err(dd, "cannot read chip CSRs\n");
14308 ret = -EINVAL;
14309 goto bail_cleanup;
14310 }
14311 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14312 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14313 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14314 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14315
Jubin John4d114fd2016-02-14 20:21:43 -080014316 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014317 * Check interrupt registers mapping if the driver has no access to
14318 * the upstream component. In this case, it is likely that the driver
14319 * is running in a VM.
14320 */
14321 if (!parent) {
14322 ret = check_int_registers(dd);
14323 if (ret)
14324 goto bail_cleanup;
14325 }
14326
14327 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014328 * obtain the hardware ID - NOT related to unit, which is a
14329 * software enumeration
14330 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014331 reg = read_csr(dd, CCE_REVISION2);
14332 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14333 & CCE_REVISION2_HFI_ID_MASK;
14334 /* the variable size will remove unwanted bits */
14335 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14336 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14337 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014338 dd->icode < ARRAY_SIZE(inames) ?
14339 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014340
14341 /* speeds the hardware can support */
14342 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14343 /* speeds allowed to run at */
14344 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14345 /* give a reasonable active value, will be set on link up */
14346 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14347
14348 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14349 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14350 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14351 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14352 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14353 /* fix up link widths for emulation _p */
14354 ppd = dd->pport;
14355 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14356 ppd->link_width_supported =
14357 ppd->link_width_enabled =
14358 ppd->link_width_downgrade_supported =
14359 ppd->link_width_downgrade_enabled =
14360 OPA_LINK_WIDTH_1X;
14361 }
14362 /* insure num_vls isn't larger than number of sdma engines */
14363 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14364 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014365 num_vls, dd->chip_sdma_engines);
14366 num_vls = dd->chip_sdma_engines;
14367 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014368 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014369 }
14370
14371 /*
14372 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14373 * Limit the max if larger than the field holds. If timeout is
14374 * non-zero, then the calculated field will be at least 1.
14375 *
14376 * Must be after icode is set up - the cclock rate depends
14377 * on knowing the hardware being used.
14378 */
14379 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14380 if (dd->rcv_intr_timeout_csr >
14381 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14382 dd->rcv_intr_timeout_csr =
14383 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14384 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14385 dd->rcv_intr_timeout_csr = 1;
14386
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014387 /* needs to be done before we look for the peer device */
14388 read_guid(dd);
14389
Dean Luick78eb1292016-03-05 08:49:45 -080014390 /* set up shared ASIC data with peer device */
14391 ret = init_asic_data(dd);
14392 if (ret)
14393 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014394
Mike Marciniszyn77241052015-07-30 15:17:43 -040014395 /* obtain chip sizes, reset chip CSRs */
14396 init_chip(dd);
14397
14398 /* read in the PCIe link speed information */
14399 ret = pcie_speeds(dd);
14400 if (ret)
14401 goto bail_cleanup;
14402
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014403 /* Needs to be called before hfi1_firmware_init */
14404 get_platform_config(dd);
14405
Mike Marciniszyn77241052015-07-30 15:17:43 -040014406 /* read in firmware */
14407 ret = hfi1_firmware_init(dd);
14408 if (ret)
14409 goto bail_cleanup;
14410
14411 /*
14412 * In general, the PCIe Gen3 transition must occur after the
14413 * chip has been idled (so it won't initiate any PCIe transactions
14414 * e.g. an interrupt) and before the driver changes any registers
14415 * (the transition will reset the registers).
14416 *
14417 * In particular, place this call after:
14418 * - init_chip() - the chip will not initiate any PCIe transactions
14419 * - pcie_speeds() - reads the current link speed
14420 * - hfi1_firmware_init() - the needed firmware is ready to be
14421 * downloaded
14422 */
14423 ret = do_pcie_gen3_transition(dd);
14424 if (ret)
14425 goto bail_cleanup;
14426
14427 /* start setting dd values and adjusting CSRs */
14428 init_early_variables(dd);
14429
14430 parse_platform_config(dd);
14431
Dean Luick5d9157a2015-11-16 21:59:34 -050014432 ret = obtain_boardname(dd);
14433 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014434 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014435
14436 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014437 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014438 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014439 (u32)dd->majrev,
14440 (u32)dd->minrev,
14441 (dd->revision >> CCE_REVISION_SW_SHIFT)
14442 & CCE_REVISION_SW_MASK);
14443
Jubin John0852d242016-04-12 11:30:08 -070014444 /*
14445 * The real cpu mask is part of the affinity struct but has to be
14446 * initialized earlier than the rest of the affinity struct because it
14447 * is needed to calculate the number of user contexts in
14448 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14449 * which initializes the rest of the affinity struct members,
14450 * depends on set_up_context_variables() for the number of kernel
14451 * contexts, so it cannot be called before set_up_context_variables().
14452 */
14453 ret = init_real_cpu_mask(dd);
14454 if (ret)
14455 goto bail_cleanup;
14456
Mike Marciniszyn77241052015-07-30 15:17:43 -040014457 ret = set_up_context_variables(dd);
14458 if (ret)
14459 goto bail_cleanup;
14460
14461 /* set initial RXE CSRs */
14462 init_rxe(dd);
14463 /* set initial TXE CSRs */
14464 init_txe(dd);
14465 /* set initial non-RXE, non-TXE CSRs */
14466 init_other(dd);
14467 /* set up KDETH QP prefix in both RX and TX CSRs */
14468 init_kdeth_qp(dd);
14469
Jubin John0852d242016-04-12 11:30:08 -070014470 hfi1_dev_affinity_init(dd);
Mitko Haralanov957558c2016-02-03 14:33:40 -080014471
Mike Marciniszyn77241052015-07-30 15:17:43 -040014472 /* send contexts must be set up before receive contexts */
14473 ret = init_send_contexts(dd);
14474 if (ret)
14475 goto bail_cleanup;
14476
14477 ret = hfi1_create_ctxts(dd);
14478 if (ret)
14479 goto bail_cleanup;
14480
14481 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14482 /*
14483 * rcd[0] is guaranteed to be valid by this point. Also, all
14484 * context are using the same value, as per the module parameter.
14485 */
14486 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14487
14488 ret = init_pervl_scs(dd);
14489 if (ret)
14490 goto bail_cleanup;
14491
14492 /* sdma init */
14493 for (i = 0; i < dd->num_pports; ++i) {
14494 ret = sdma_init(dd, i);
14495 if (ret)
14496 goto bail_cleanup;
14497 }
14498
14499 /* use contexts created by hfi1_create_ctxts */
14500 ret = set_up_interrupts(dd);
14501 if (ret)
14502 goto bail_cleanup;
14503
14504 /* set up LCB access - must be after set_up_interrupts() */
14505 init_lcb_access(dd);
14506
14507 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14508 dd->base_guid & 0xFFFFFF);
14509
14510 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14511 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14512 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14513
14514 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14515 if (ret)
14516 goto bail_clear_intr;
14517 check_fabric_firmware_versions(dd);
14518
14519 thermal_init(dd);
14520
14521 ret = init_cntrs(dd);
14522 if (ret)
14523 goto bail_clear_intr;
14524
14525 ret = init_rcverr(dd);
14526 if (ret)
14527 goto bail_free_cntrs;
14528
14529 ret = eprom_init(dd);
14530 if (ret)
14531 goto bail_free_rcverr;
14532
14533 goto bail;
14534
14535bail_free_rcverr:
14536 free_rcverr(dd);
14537bail_free_cntrs:
14538 free_cntrs(dd);
14539bail_clear_intr:
14540 clean_up_interrupts(dd);
14541bail_cleanup:
14542 hfi1_pcie_ddcleanup(dd);
14543bail_free:
14544 hfi1_free_devdata(dd);
14545 dd = ERR_PTR(ret);
14546bail:
14547 return dd;
14548}
14549
14550static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14551 u32 dw_len)
14552{
14553 u32 delta_cycles;
14554 u32 current_egress_rate = ppd->current_egress_rate;
14555 /* rates here are in units of 10^6 bits/sec */
14556
14557 if (desired_egress_rate == -1)
14558 return 0; /* shouldn't happen */
14559
14560 if (desired_egress_rate >= current_egress_rate)
14561 return 0; /* we can't help go faster, only slower */
14562
14563 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14564 egress_cycles(dw_len * 4, current_egress_rate);
14565
14566 return (u16)delta_cycles;
14567}
14568
Mike Marciniszyn77241052015-07-30 15:17:43 -040014569/**
14570 * create_pbc - build a pbc for transmission
14571 * @flags: special case flags or-ed in built pbc
14572 * @srate: static rate
14573 * @vl: vl
14574 * @dwlen: dword length (header words + data words + pbc words)
14575 *
14576 * Create a PBC with the given flags, rate, VL, and length.
14577 *
14578 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14579 * for verbs, which does not use this PSM feature. The lone other caller
14580 * is for the diagnostic interface which calls this if the user does not
14581 * supply their own PBC.
14582 */
14583u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14584 u32 dw_len)
14585{
14586 u64 pbc, delay = 0;
14587
14588 if (unlikely(srate_mbs))
14589 delay = delay_cycles(ppd, srate_mbs, dw_len);
14590
14591 pbc = flags
14592 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14593 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14594 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14595 | (dw_len & PBC_LENGTH_DWS_MASK)
14596 << PBC_LENGTH_DWS_SHIFT;
14597
14598 return pbc;
14599}
14600
14601#define SBUS_THERMAL 0x4f
14602#define SBUS_THERM_MONITOR_MODE 0x1
14603
14604#define THERM_FAILURE(dev, ret, reason) \
14605 dd_dev_err((dd), \
14606 "Thermal sensor initialization failed: %s (%d)\n", \
14607 (reason), (ret))
14608
14609/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070014610 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014611 *
14612 * After initialization, enable polling of thermal sensor through
14613 * SBus interface. In order for this to work, the SBus Master
14614 * firmware has to be loaded due to the fact that the HW polling
14615 * logic uses SBus interrupts, which are not supported with
14616 * default firmware. Otherwise, no data will be returned through
14617 * the ASIC_STS_THERM CSR.
14618 */
14619static int thermal_init(struct hfi1_devdata *dd)
14620{
14621 int ret = 0;
14622
14623 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080014624 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014625 return ret;
14626
Dean Luick576531f2016-03-05 08:50:01 -080014627 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14628 if (ret) {
14629 THERM_FAILURE(dd, ret, "Acquire SBus");
14630 return ret;
14631 }
14632
Mike Marciniszyn77241052015-07-30 15:17:43 -040014633 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014634 /* Disable polling of thermal readings */
14635 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14636 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014637 /* Thermal Sensor Initialization */
14638 /* Step 1: Reset the Thermal SBus Receiver */
14639 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14640 RESET_SBUS_RECEIVER, 0);
14641 if (ret) {
14642 THERM_FAILURE(dd, ret, "Bus Reset");
14643 goto done;
14644 }
14645 /* Step 2: Set Reset bit in Thermal block */
14646 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14647 WRITE_SBUS_RECEIVER, 0x1);
14648 if (ret) {
14649 THERM_FAILURE(dd, ret, "Therm Block Reset");
14650 goto done;
14651 }
14652 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14653 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14654 WRITE_SBUS_RECEIVER, 0x32);
14655 if (ret) {
14656 THERM_FAILURE(dd, ret, "Write Clock Div");
14657 goto done;
14658 }
14659 /* Step 4: Select temperature mode */
14660 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14661 WRITE_SBUS_RECEIVER,
14662 SBUS_THERM_MONITOR_MODE);
14663 if (ret) {
14664 THERM_FAILURE(dd, ret, "Write Mode Sel");
14665 goto done;
14666 }
14667 /* Step 5: De-assert block reset and start conversion */
14668 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14669 WRITE_SBUS_RECEIVER, 0x2);
14670 if (ret) {
14671 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14672 goto done;
14673 }
14674 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14675 msleep(22);
14676
14677 /* Enable polling of thermal readings */
14678 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080014679
14680 /* Set initialized flag */
14681 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14682 if (ret)
14683 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14684
Mike Marciniszyn77241052015-07-30 15:17:43 -040014685done:
Dean Luick576531f2016-03-05 08:50:01 -080014686 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014687 return ret;
14688}
14689
14690static void handle_temp_err(struct hfi1_devdata *dd)
14691{
14692 struct hfi1_pportdata *ppd = &dd->pport[0];
14693 /*
14694 * Thermal Critical Interrupt
14695 * Put the device into forced freeze mode, take link down to
14696 * offline, and put DC into reset.
14697 */
14698 dd_dev_emerg(dd,
14699 "Critical temperature reached! Forcing device into freeze mode!\n");
14700 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014701 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014702 /*
14703 * Shut DC down as much and as quickly as possible.
14704 *
14705 * Step 1: Take the link down to OFFLINE. This will cause the
14706 * 8051 to put the Serdes in reset. However, we don't want to
14707 * go through the entire link state machine since we want to
14708 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14709 * but rather an attempt to save the chip.
14710 * Code below is almost the same as quiet_serdes() but avoids
14711 * all the extra work and the sleeps.
14712 */
14713 ppd->driver_link_ready = 0;
14714 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080014715 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14716 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014717 /*
14718 * Step 2: Shutdown LCB and 8051
14719 * After shutdown, do not restore DC_CFG_RESET value.
14720 */
14721 dc_shutdown(dd);
14722}