blob: d8cc329901d060f96887d97fb9e7ef79a10a5530 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040066
67#define NUM_IB_PORTS 1
68
69uint kdeth_qp;
70module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74module_param(num_vls, uint, S_IRUGO);
75MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77/*
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
83 */
84uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85module_param(rcv_intr_timeout, uint, S_IRUGO);
86MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88uint rcv_intr_count = 16; /* same as qib */
89module_param(rcv_intr_count, uint, S_IRUGO);
90MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92ushort link_crc_mask = SUPPORTED_CRCS;
93module_param(link_crc_mask, ushort, S_IRUGO);
94MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96uint loopback;
97module_param_named(loopback, loopback, uint, S_IRUGO);
98MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100/* Other driver tunables */
101uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102static ushort crc_14b_sideband = 1;
103static uint use_flr = 1;
104uint quick_linkup; /* skip LNI */
105
106struct flag_table {
107 u64 flag; /* the flag */
108 char *str; /* description string */
109 u16 extra; /* extra information */
110 u16 unused0;
111 u32 unused1;
112};
113
114/* str must be a string constant */
115#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116#define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118/* Send Error Consequences */
119#define SEC_WRITE_DROPPED 0x1
120#define SEC_PACKET_DROPPED 0x2
121#define SEC_SC_HALTED 0x4 /* per-context only */
122#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
123
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500125#define FIRST_KERNEL_KCTXT 1
Dean Luick372cc85a2016-04-12 11:30:51 -0700126/* sizes for both the QP and RSM map tables */
127#define NUM_MAP_ENTRIES 256
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800424 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800427 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800513/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514#define PORT_DISCARD_EGRESS_ERRS \
515 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518
Mike Marciniszyn77241052015-07-30 15:17:43 -0400519/*
520 * TXE Egress Error flags
521 */
522#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523static struct flag_table egress_err_status_flags[] = {
524/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526/* 2 reserved */
527/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531/* 6 reserved */
532/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536/* 9-10 reserved */
537/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 SEES(TX_SDMA0_DISALLOWED_PACKET)),
545/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 SEES(TX_SDMA1_DISALLOWED_PACKET)),
547/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 SEES(TX_SDMA2_DISALLOWED_PACKET)),
549/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 SEES(TX_SDMA3_DISALLOWED_PACKET)),
551/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 SEES(TX_SDMA4_DISALLOWED_PACKET)),
553/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 SEES(TX_SDMA5_DISALLOWED_PACKET)),
555/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 SEES(TX_SDMA6_DISALLOWED_PACKET)),
557/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 SEES(TX_SDMA7_DISALLOWED_PACKET)),
559/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 SEES(TX_SDMA8_DISALLOWED_PACKET)),
561/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 SEES(TX_SDMA9_DISALLOWED_PACKET)),
563/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 SEES(TX_SDMA10_DISALLOWED_PACKET)),
565/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 SEES(TX_SDMA11_DISALLOWED_PACKET)),
567/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 SEES(TX_SDMA12_DISALLOWED_PACKET)),
569/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 SEES(TX_SDMA13_DISALLOWED_PACKET)),
571/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 SEES(TX_SDMA14_DISALLOWED_PACKET)),
573/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 SEES(TX_SDMA15_DISALLOWED_PACKET)),
575/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618};
619
620/*
621 * TXE Egress Error Info flags
622 */
623#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624static struct flag_table egress_err_info_flags[] = {
625/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
626/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
627/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
634/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647};
648
649/* TXE Egress errors that cause an SPC freeze */
650#define ALL_TXE_EGRESS_FREEZE_ERR \
651 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 | SEES(TX_LAUNCH_CSR_PARITY) \
656 | SEES(TX_SBRD_CTL_CSR_PARITY) \
657 | SEES(TX_CONFIG_PARITY) \
658 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 | SEES(TX_CREDIT_RETURN_PARITY))
668
669/*
670 * TXE Send error flags
671 */
672#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500674/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400675/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677};
678
679/*
680 * TXE Send Context Error flags and consequences
681 */
682static struct flag_table sc_err_status_flags[] = {
683/* 0*/ FLAG_ENTRY("InconsistentSop",
684 SEC_PACKET_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686/* 1*/ FLAG_ENTRY("DisallowedPacket",
687 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692/* 3*/ FLAG_ENTRY("WriteOverflow",
693 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698/* 5-63 reserved*/
699};
700
701/*
702 * RXE Receive Error flags
703 */
704#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705static struct flag_table rxe_err_status_flags[] = {
706/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 RXES(RBUF_BLOCK_LIST_READ_UNC)),
728/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 RXES(RBUF_BLOCK_LIST_READ_COR)),
730/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 RXES(RBUF_CSR_QENT_CNT_PARITY)),
734/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 RXES(RBUF_FL_INITDONE_PARITY)),
751/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 RXES(LOOKUP_DES_PART1_UNC_COR)),
758/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 RXES(LOOKUP_DES_PART2_PARITY)),
760/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782};
783
784/* RXE errors that will trigger an SPC freeze */
785#define ALL_RXE_FREEZE_ERR \
786 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830
831#define RXE_FREEZE_ABORT_MASK \
832 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835
836/*
837 * DCC Error Flags
838 */
839#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840static struct flag_table dcc_err_flags[] = {
841 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887};
888
889/*
890 * LCB error flags
891 */
892#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893static struct flag_table lcb_err_flags[] = {
894/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 LCBE(REDUNDANT_FLIT_PARITY_ERR))
930};
931
932/*
933 * DC8051 Error Flags
934 */
935#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936static struct flag_table dc8051_err_flags[] = {
937 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800946 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400947 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948};
949
950/*
951 * DC8051 Information Error flags
952 *
953 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954 */
955static struct flag_table dc8051_info_err_flags[] = {
956 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
957 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
958 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
959 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800960 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400961 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
962 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
963 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
964 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
965 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
966 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -0800968 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
969 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001034static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001035static void handle_temp_err(struct hfi1_devdata *);
1036static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *);
Dean Luick8f000f72016-04-12 11:32:06 -07001038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001040
1041/*
1042 * Error interrupt table entry. This is used as input to the interrupt
1043 * "clear down" routine used for all second tier error interrupt register.
1044 * Second tier interrupt registers have a single bit representing them
1045 * in the top-level CceIntStatus.
1046 */
1047struct err_reg_info {
1048 u32 status; /* status CSR offset */
1049 u32 clear; /* clear CSR offset */
1050 u32 mask; /* mask CSR offset */
1051 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1052 const char *desc;
1053};
1054
1055#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1056#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1057#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1058
1059/*
1060 * Helpers for building HFI and DC error interrupt table entries. Different
1061 * helpers are needed because of inconsistent register names.
1062 */
1063#define EE(reg, handler, desc) \
1064 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1065 handler, desc }
1066#define DC_EE1(reg, handler, desc) \
1067 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1068#define DC_EE2(reg, handler, desc) \
1069 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1070
1071/*
1072 * Table of the "misc" grouping of error interrupts. Each entry refers to
1073 * another register containing more information.
1074 */
1075static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1076/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1077/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1078/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1079/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1080/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1081/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1082/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1083/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1084 /* the rest are reserved */
1085};
1086
1087/*
1088 * Index into the Various section of the interrupt sources
1089 * corresponding to the Critical Temperature interrupt.
1090 */
1091#define TCRIT_INT_SOURCE 4
1092
1093/*
1094 * SDMA error interrupt entry - refers to another register containing more
1095 * information.
1096 */
1097static const struct err_reg_info sdma_eng_err =
1098 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1099
1100static const struct err_reg_info various_err[NUM_VARIOUS] = {
1101/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1102/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1103/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1104/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1105/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1106 /* rest are reserved */
1107};
1108
1109/*
1110 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1111 * register can not be derived from the MTU value because 10K is not
1112 * a power of 2. Therefore, we need a constant. Everything else can
1113 * be calculated.
1114 */
1115#define DCC_CFG_PORT_MTU_CAP_10240 7
1116
1117/*
1118 * Table of the DC grouping of error interrupts. Each entry refers to
1119 * another register containing more information.
1120 */
1121static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1122/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1123/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1124/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1125/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1126 /* the rest are reserved */
1127};
1128
1129struct cntr_entry {
1130 /*
1131 * counter name
1132 */
1133 char *name;
1134
1135 /*
1136 * csr to read for name (if applicable)
1137 */
1138 u64 csr;
1139
1140 /*
1141 * offset into dd or ppd to store the counter's value
1142 */
1143 int offset;
1144
1145 /*
1146 * flags
1147 */
1148 u8 flags;
1149
1150 /*
1151 * accessor for stat element, context either dd or ppd
1152 */
Jubin John17fb4f22016-02-14 20:21:52 -08001153 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1154 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001155};
1156
1157#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1158#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1159
1160#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1161{ \
1162 name, \
1163 csr, \
1164 offset, \
1165 flags, \
1166 accessor \
1167}
1168
1169/* 32bit RXE */
1170#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 port_access_u32_csr)
1175
1176#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY32), \
1179 0, flags | CNTR_32BIT, \
1180 dev_access_u32_csr)
1181
1182/* 64bit RXE */
1183#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 port_access_u64_csr)
1188
1189#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1190CNTR_ELEM(#name, \
1191 (counter * 8 + RCV_COUNTER_ARRAY64), \
1192 0, flags, \
1193 dev_access_u64_csr)
1194
1195#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1196#define OVR_ELM(ctx) \
1197CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001198 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001199 0, CNTR_NORMAL, port_access_u64_csr)
1200
1201/* 32bit TXE */
1202#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY32), \
1205 0, flags | CNTR_32BIT, \
1206 port_access_u32_csr)
1207
1208/* 64bit TXE */
1209#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name, \
1211 (counter * 8 + SEND_COUNTER_ARRAY64), \
1212 0, flags, \
1213 port_access_u64_csr)
1214
1215# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1216CNTR_ELEM(#name,\
1217 counter * 8 + SEND_COUNTER_ARRAY64, \
1218 0, \
1219 flags, \
1220 dev_access_u64_csr)
1221
1222/* CCE */
1223#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1232 0, flags | CNTR_32BIT, \
1233 dev_access_u32_csr)
1234
1235/* DC */
1236#define DC_PERF_CNTR(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dev_access_u64_csr)
1242
1243#define DC_PERF_CNTR_LCB(name, counter, flags) \
1244CNTR_ELEM(#name, \
1245 counter, \
1246 0, \
1247 flags, \
1248 dc_access_lcb_cntr)
1249
1250/* ibp counters */
1251#define SW_IBP_CNTR(name, cntr) \
1252CNTR_ELEM(#name, \
1253 0, \
1254 0, \
1255 CNTR_SYNTH, \
1256 access_ibp_##cntr)
1257
1258u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1259{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301261 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001262 }
1263 return -1;
1264}
1265
1266void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1267{
1268 if (dd->flags & HFI1_PRESENT)
1269 writeq(value, (void __iomem *)dd->kregbase + offset);
1270}
1271
1272void __iomem *get_csr_addr(
1273 struct hfi1_devdata *dd,
1274 u32 offset)
1275{
1276 return (void __iomem *)dd->kregbase + offset;
1277}
1278
1279static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1280 int mode, u64 value)
1281{
1282 u64 ret;
1283
Mike Marciniszyn77241052015-07-30 15:17:43 -04001284 if (mode == CNTR_MODE_R) {
1285 ret = read_csr(dd, csr);
1286 } else if (mode == CNTR_MODE_W) {
1287 write_csr(dd, csr, value);
1288 ret = value;
1289 } else {
1290 dd_dev_err(dd, "Invalid cntr register access mode");
1291 return 0;
1292 }
1293
1294 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1295 return ret;
1296}
1297
1298/* Dev Access */
1299static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001300 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001301{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301302 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001303 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001304
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001305 if (entry->flags & CNTR_SDMA) {
1306 if (vl == CNTR_INVALID_VL)
1307 return 0;
1308 csr += 0x100 * vl;
1309 } else {
1310 if (vl != CNTR_INVALID_VL)
1311 return 0;
1312 }
1313 return read_write_csr(dd, csr, mode, data);
1314}
1315
1316static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1317 void *context, int idx, int mode, u64 data)
1318{
1319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1320
1321 if (dd->per_sdma && idx < dd->num_sdma)
1322 return dd->per_sdma[idx].err_cnt;
1323 return 0;
1324}
1325
1326static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1327 void *context, int idx, int mode, u64 data)
1328{
1329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1330
1331 if (dd->per_sdma && idx < dd->num_sdma)
1332 return dd->per_sdma[idx].sdma_int_cnt;
1333 return 0;
1334}
1335
1336static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1337 void *context, int idx, int mode, u64 data)
1338{
1339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1340
1341 if (dd->per_sdma && idx < dd->num_sdma)
1342 return dd->per_sdma[idx].idle_int_cnt;
1343 return 0;
1344}
1345
1346static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1347 void *context, int idx, int mode,
1348 u64 data)
1349{
1350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1351
1352 if (dd->per_sdma && idx < dd->num_sdma)
1353 return dd->per_sdma[idx].progress_int_cnt;
1354 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001355}
1356
1357static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001358 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301360 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001361
1362 u64 val = 0;
1363 u64 csr = entry->csr;
1364
1365 if (entry->flags & CNTR_VL) {
1366 if (vl == CNTR_INVALID_VL)
1367 return 0;
1368 csr += 8 * vl;
1369 } else {
1370 if (vl != CNTR_INVALID_VL)
1371 return 0;
1372 }
1373
1374 val = read_write_csr(dd, csr, mode, data);
1375 return val;
1376}
1377
1378static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001379 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001380{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301381 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001382 u32 csr = entry->csr;
1383 int ret = 0;
1384
1385 if (vl != CNTR_INVALID_VL)
1386 return 0;
1387 if (mode == CNTR_MODE_R)
1388 ret = read_lcb_csr(dd, csr, &data);
1389 else if (mode == CNTR_MODE_W)
1390 ret = write_lcb_csr(dd, csr, data);
1391
1392 if (ret) {
1393 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1394 return 0;
1395 }
1396
1397 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1398 return data;
1399}
1400
1401/* Port Access */
1402static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001403 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001404{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301405 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001406
1407 if (vl != CNTR_INVALID_VL)
1408 return 0;
1409 return read_write_csr(ppd->dd, entry->csr, mode, data);
1410}
1411
1412static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001413 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001414{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301415 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001416 u64 val;
1417 u64 csr = entry->csr;
1418
1419 if (entry->flags & CNTR_VL) {
1420 if (vl == CNTR_INVALID_VL)
1421 return 0;
1422 csr += 8 * vl;
1423 } else {
1424 if (vl != CNTR_INVALID_VL)
1425 return 0;
1426 }
1427 val = read_write_csr(ppd->dd, csr, mode, data);
1428 return val;
1429}
1430
1431/* Software defined */
1432static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1433 u64 data)
1434{
1435 u64 ret;
1436
1437 if (mode == CNTR_MODE_R) {
1438 ret = *cntr;
1439 } else if (mode == CNTR_MODE_W) {
1440 *cntr = data;
1441 ret = data;
1442 } else {
1443 dd_dev_err(dd, "Invalid cntr sw access mode");
1444 return 0;
1445 }
1446
1447 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1448
1449 return ret;
1450}
1451
1452static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001453 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001454{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301455 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001456
1457 if (vl != CNTR_INVALID_VL)
1458 return 0;
1459 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1460}
1461
1462static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001463 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301465 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001466
1467 if (vl != CNTR_INVALID_VL)
1468 return 0;
1469 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1470}
1471
Dean Luick6d014532015-12-01 15:38:23 -05001472static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1473 void *context, int vl, int mode,
1474 u64 data)
1475{
1476 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1477
1478 if (vl != CNTR_INVALID_VL)
1479 return 0;
1480 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1481}
1482
Mike Marciniszyn77241052015-07-30 15:17:43 -04001483static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001484 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001485{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001486 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1487 u64 zero = 0;
1488 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001489
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001490 if (vl == CNTR_INVALID_VL)
1491 counter = &ppd->port_xmit_discards;
1492 else if (vl >= 0 && vl < C_VL_COUNT)
1493 counter = &ppd->port_xmit_discards_vl[vl];
1494 else
1495 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001497 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001498}
1499
1500static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001501 void *context, int vl, int mode,
1502 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301504 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505
1506 if (vl != CNTR_INVALID_VL)
1507 return 0;
1508
1509 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1510 mode, data);
1511}
1512
1513static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001514 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001515{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301516 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001517
1518 if (vl != CNTR_INVALID_VL)
1519 return 0;
1520
1521 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1522 mode, data);
1523}
1524
1525u64 get_all_cpu_total(u64 __percpu *cntr)
1526{
1527 int cpu;
1528 u64 counter = 0;
1529
1530 for_each_possible_cpu(cpu)
1531 counter += *per_cpu_ptr(cntr, cpu);
1532 return counter;
1533}
1534
1535static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1536 u64 __percpu *cntr,
1537 int vl, int mode, u64 data)
1538{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001539 u64 ret = 0;
1540
1541 if (vl != CNTR_INVALID_VL)
1542 return 0;
1543
1544 if (mode == CNTR_MODE_R) {
1545 ret = get_all_cpu_total(cntr) - *z_val;
1546 } else if (mode == CNTR_MODE_W) {
1547 /* A write can only zero the counter */
1548 if (data == 0)
1549 *z_val = get_all_cpu_total(cntr);
1550 else
1551 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1552 } else {
1553 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1554 return 0;
1555 }
1556
1557 return ret;
1558}
1559
1560static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1561 void *context, int vl, int mode, u64 data)
1562{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301563 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001564
1565 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1566 mode, data);
1567}
1568
1569static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001570 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001571{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301572 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001573
1574 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1575 mode, data);
1576}
1577
1578static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1579 void *context, int vl, int mode, u64 data)
1580{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301581 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001582
1583 return dd->verbs_dev.n_piowait;
1584}
1585
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001586static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
1589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1590
1591 return dd->verbs_dev.n_piodrain;
1592}
1593
Mike Marciniszyn77241052015-07-30 15:17:43 -04001594static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301597 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001598
1599 return dd->verbs_dev.n_txwait;
1600}
1601
1602static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1604{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301605 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
1607 return dd->verbs_dev.n_kmem_wait;
1608}
1609
Dean Luickb4219222015-10-26 10:28:35 -04001610static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001611 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001612{
1613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1614
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001615 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1616 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001617}
1618
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001619/* Software counters for the error status bits within MISC_ERR_STATUS */
1620static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1621 void *context, int vl, int mode,
1622 u64 data)
1623{
1624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1625
1626 return dd->misc_err_status_cnt[12];
1627}
1628
1629static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1630 void *context, int vl, int mode,
1631 u64 data)
1632{
1633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634
1635 return dd->misc_err_status_cnt[11];
1636}
1637
1638static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1639 void *context, int vl, int mode,
1640 u64 data)
1641{
1642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643
1644 return dd->misc_err_status_cnt[10];
1645}
1646
1647static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1648 void *context, int vl,
1649 int mode, u64 data)
1650{
1651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652
1653 return dd->misc_err_status_cnt[9];
1654}
1655
1656static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1657 void *context, int vl, int mode,
1658 u64 data)
1659{
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->misc_err_status_cnt[8];
1663}
1664
1665static u64 access_misc_efuse_read_bad_addr_err_cnt(
1666 const struct cntr_entry *entry,
1667 void *context, int vl, int mode, u64 data)
1668{
1669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670
1671 return dd->misc_err_status_cnt[7];
1672}
1673
1674static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1675 void *context, int vl,
1676 int mode, u64 data)
1677{
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->misc_err_status_cnt[6];
1681}
1682
1683static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1684 void *context, int vl, int mode,
1685 u64 data)
1686{
1687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688
1689 return dd->misc_err_status_cnt[5];
1690}
1691
1692static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1693 void *context, int vl, int mode,
1694 u64 data)
1695{
1696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697
1698 return dd->misc_err_status_cnt[4];
1699}
1700
1701static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1702 void *context, int vl,
1703 int mode, u64 data)
1704{
1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706
1707 return dd->misc_err_status_cnt[3];
1708}
1709
1710static u64 access_misc_csr_write_bad_addr_err_cnt(
1711 const struct cntr_entry *entry,
1712 void *context, int vl, int mode, u64 data)
1713{
1714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715
1716 return dd->misc_err_status_cnt[2];
1717}
1718
1719static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1720 void *context, int vl,
1721 int mode, u64 data)
1722{
1723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1724
1725 return dd->misc_err_status_cnt[1];
1726}
1727
1728static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1729 void *context, int vl, int mode,
1730 u64 data)
1731{
1732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1733
1734 return dd->misc_err_status_cnt[0];
1735}
1736
1737/*
1738 * Software counter for the aggregate of
1739 * individual CceErrStatus counters
1740 */
1741static u64 access_sw_cce_err_status_aggregated_cnt(
1742 const struct cntr_entry *entry,
1743 void *context, int vl, int mode, u64 data)
1744{
1745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747 return dd->sw_cce_err_status_aggregate;
1748}
1749
1750/*
1751 * Software counters corresponding to each of the
1752 * error status bits within CceErrStatus
1753 */
1754static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1755 void *context, int vl, int mode,
1756 u64 data)
1757{
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759
1760 return dd->cce_err_status_cnt[40];
1761}
1762
1763static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl, int mode,
1765 u64 data)
1766{
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->cce_err_status_cnt[39];
1770}
1771
1772static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775{
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->cce_err_status_cnt[38];
1779}
1780
1781static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1782 void *context, int vl, int mode,
1783 u64 data)
1784{
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->cce_err_status_cnt[37];
1788}
1789
1790static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1791 void *context, int vl, int mode,
1792 u64 data)
1793{
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->cce_err_status_cnt[36];
1797}
1798
1799static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1800 const struct cntr_entry *entry,
1801 void *context, int vl, int mode, u64 data)
1802{
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->cce_err_status_cnt[35];
1806}
1807
1808static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1809 const struct cntr_entry *entry,
1810 void *context, int vl, int mode, u64 data)
1811{
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->cce_err_status_cnt[34];
1815}
1816
1817static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1818 void *context, int vl,
1819 int mode, u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->cce_err_status_cnt[33];
1824}
1825
1826static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1827 void *context, int vl, int mode,
1828 u64 data)
1829{
1830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831
1832 return dd->cce_err_status_cnt[32];
1833}
1834
1835static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1836 void *context, int vl, int mode, u64 data)
1837{
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[31];
1841}
1842
1843static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode,
1845 u64 data)
1846{
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[30];
1850}
1851
1852static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1853 void *context, int vl, int mode,
1854 u64 data)
1855{
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[29];
1859}
1860
1861static u64 access_pcic_transmit_back_parity_err_cnt(
1862 const struct cntr_entry *entry,
1863 void *context, int vl, int mode, u64 data)
1864{
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[28];
1868}
1869
1870static u64 access_pcic_transmit_front_parity_err_cnt(
1871 const struct cntr_entry *entry,
1872 void *context, int vl, int mode, u64 data)
1873{
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[27];
1877}
1878
1879static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1880 void *context, int vl, int mode,
1881 u64 data)
1882{
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[26];
1886}
1887
1888static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1889 void *context, int vl, int mode,
1890 u64 data)
1891{
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[25];
1895}
1896
1897static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1898 void *context, int vl, int mode,
1899 u64 data)
1900{
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[24];
1904}
1905
1906static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1907 void *context, int vl, int mode,
1908 u64 data)
1909{
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[23];
1913}
1914
1915static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1916 void *context, int vl,
1917 int mode, u64 data)
1918{
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[22];
1922}
1923
1924static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1925 void *context, int vl, int mode,
1926 u64 data)
1927{
1928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929
1930 return dd->cce_err_status_cnt[21];
1931}
1932
1933static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1934 const struct cntr_entry *entry,
1935 void *context, int vl, int mode, u64 data)
1936{
1937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938
1939 return dd->cce_err_status_cnt[20];
1940}
1941
1942static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1943 void *context, int vl,
1944 int mode, u64 data)
1945{
1946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947
1948 return dd->cce_err_status_cnt[19];
1949}
1950
1951static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1952 void *context, int vl, int mode,
1953 u64 data)
1954{
1955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956
1957 return dd->cce_err_status_cnt[18];
1958}
1959
1960static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1961 void *context, int vl, int mode,
1962 u64 data)
1963{
1964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965
1966 return dd->cce_err_status_cnt[17];
1967}
1968
1969static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1970 void *context, int vl, int mode,
1971 u64 data)
1972{
1973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974
1975 return dd->cce_err_status_cnt[16];
1976}
1977
1978static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1979 void *context, int vl, int mode,
1980 u64 data)
1981{
1982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983
1984 return dd->cce_err_status_cnt[15];
1985}
1986
1987static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1988 void *context, int vl,
1989 int mode, u64 data)
1990{
1991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992
1993 return dd->cce_err_status_cnt[14];
1994}
1995
1996static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1997 void *context, int vl, int mode,
1998 u64 data)
1999{
2000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001
2002 return dd->cce_err_status_cnt[13];
2003}
2004
2005static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2006 const struct cntr_entry *entry,
2007 void *context, int vl, int mode, u64 data)
2008{
2009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010
2011 return dd->cce_err_status_cnt[12];
2012}
2013
2014static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2015 const struct cntr_entry *entry,
2016 void *context, int vl, int mode, u64 data)
2017{
2018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019
2020 return dd->cce_err_status_cnt[11];
2021}
2022
2023static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2024 const struct cntr_entry *entry,
2025 void *context, int vl, int mode, u64 data)
2026{
2027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028
2029 return dd->cce_err_status_cnt[10];
2030}
2031
2032static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2033 const struct cntr_entry *entry,
2034 void *context, int vl, int mode, u64 data)
2035{
2036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037
2038 return dd->cce_err_status_cnt[9];
2039}
2040
2041static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2042 const struct cntr_entry *entry,
2043 void *context, int vl, int mode, u64 data)
2044{
2045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046
2047 return dd->cce_err_status_cnt[8];
2048}
2049
2050static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2051 void *context, int vl,
2052 int mode, u64 data)
2053{
2054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055
2056 return dd->cce_err_status_cnt[7];
2057}
2058
2059static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2060 const struct cntr_entry *entry,
2061 void *context, int vl, int mode, u64 data)
2062{
2063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064
2065 return dd->cce_err_status_cnt[6];
2066}
2067
2068static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2069 void *context, int vl, int mode,
2070 u64 data)
2071{
2072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073
2074 return dd->cce_err_status_cnt[5];
2075}
2076
2077static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2078 void *context, int vl, int mode,
2079 u64 data)
2080{
2081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082
2083 return dd->cce_err_status_cnt[4];
2084}
2085
2086static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2087 const struct cntr_entry *entry,
2088 void *context, int vl, int mode, u64 data)
2089{
2090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091
2092 return dd->cce_err_status_cnt[3];
2093}
2094
2095static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2096 void *context, int vl,
2097 int mode, u64 data)
2098{
2099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100
2101 return dd->cce_err_status_cnt[2];
2102}
2103
2104static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2105 void *context, int vl,
2106 int mode, u64 data)
2107{
2108 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2109
2110 return dd->cce_err_status_cnt[1];
2111}
2112
2113static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2114 void *context, int vl, int mode,
2115 u64 data)
2116{
2117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2118
2119 return dd->cce_err_status_cnt[0];
2120}
2121
2122/*
2123 * Software counters corresponding to each of the
2124 * error status bits within RcvErrStatus
2125 */
2126static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2127 void *context, int vl, int mode,
2128 u64 data)
2129{
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132 return dd->rcv_err_status_cnt[63];
2133}
2134
2135static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2136 void *context, int vl,
2137 int mode, u64 data)
2138{
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->rcv_err_status_cnt[62];
2142}
2143
2144static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl, int mode,
2146 u64 data)
2147{
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->rcv_err_status_cnt[61];
2151}
2152
2153static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl, int mode,
2155 u64 data)
2156{
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->rcv_err_status_cnt[60];
2160}
2161
2162static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl,
2164 int mode, u64 data)
2165{
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->rcv_err_status_cnt[59];
2169}
2170
2171static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl,
2173 int mode, u64 data)
2174{
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->rcv_err_status_cnt[58];
2178}
2179
2180static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl, int mode,
2182 u64 data)
2183{
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->rcv_err_status_cnt[57];
2187}
2188
2189static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2190 void *context, int vl, int mode,
2191 u64 data)
2192{
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->rcv_err_status_cnt[56];
2196}
2197
2198static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl, int mode,
2200 u64 data)
2201{
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->rcv_err_status_cnt[55];
2205}
2206
2207static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2208 const struct cntr_entry *entry,
2209 void *context, int vl, int mode, u64 data)
2210{
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->rcv_err_status_cnt[54];
2214}
2215
2216static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2217 const struct cntr_entry *entry,
2218 void *context, int vl, int mode, u64 data)
2219{
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221
2222 return dd->rcv_err_status_cnt[53];
2223}
2224
2225static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2226 void *context, int vl,
2227 int mode, u64 data)
2228{
2229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230
2231 return dd->rcv_err_status_cnt[52];
2232}
2233
2234static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2235 void *context, int vl,
2236 int mode, u64 data)
2237{
2238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239
2240 return dd->rcv_err_status_cnt[51];
2241}
2242
2243static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2244 void *context, int vl,
2245 int mode, u64 data)
2246{
2247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248
2249 return dd->rcv_err_status_cnt[50];
2250}
2251
2252static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2253 void *context, int vl,
2254 int mode, u64 data)
2255{
2256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257
2258 return dd->rcv_err_status_cnt[49];
2259}
2260
2261static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2262 void *context, int vl,
2263 int mode, u64 data)
2264{
2265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266
2267 return dd->rcv_err_status_cnt[48];
2268}
2269
2270static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2271 void *context, int vl,
2272 int mode, u64 data)
2273{
2274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275
2276 return dd->rcv_err_status_cnt[47];
2277}
2278
2279static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2280 void *context, int vl, int mode,
2281 u64 data)
2282{
2283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284
2285 return dd->rcv_err_status_cnt[46];
2286}
2287
2288static u64 access_rx_hq_intr_csr_parity_err_cnt(
2289 const struct cntr_entry *entry,
2290 void *context, int vl, int mode, u64 data)
2291{
2292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293
2294 return dd->rcv_err_status_cnt[45];
2295}
2296
2297static u64 access_rx_lookup_csr_parity_err_cnt(
2298 const struct cntr_entry *entry,
2299 void *context, int vl, int mode, u64 data)
2300{
2301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302
2303 return dd->rcv_err_status_cnt[44];
2304}
2305
2306static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2307 const struct cntr_entry *entry,
2308 void *context, int vl, int mode, u64 data)
2309{
2310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311
2312 return dd->rcv_err_status_cnt[43];
2313}
2314
2315static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2316 const struct cntr_entry *entry,
2317 void *context, int vl, int mode, u64 data)
2318{
2319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320
2321 return dd->rcv_err_status_cnt[42];
2322}
2323
2324static u64 access_rx_lookup_des_part2_parity_err_cnt(
2325 const struct cntr_entry *entry,
2326 void *context, int vl, int mode, u64 data)
2327{
2328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329
2330 return dd->rcv_err_status_cnt[41];
2331}
2332
2333static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2334 const struct cntr_entry *entry,
2335 void *context, int vl, int mode, u64 data)
2336{
2337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338
2339 return dd->rcv_err_status_cnt[40];
2340}
2341
2342static u64 access_rx_lookup_des_part1_unc_err_cnt(
2343 const struct cntr_entry *entry,
2344 void *context, int vl, int mode, u64 data)
2345{
2346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347
2348 return dd->rcv_err_status_cnt[39];
2349}
2350
2351static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2352 const struct cntr_entry *entry,
2353 void *context, int vl, int mode, u64 data)
2354{
2355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356
2357 return dd->rcv_err_status_cnt[38];
2358}
2359
2360static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2361 const struct cntr_entry *entry,
2362 void *context, int vl, int mode, u64 data)
2363{
2364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365
2366 return dd->rcv_err_status_cnt[37];
2367}
2368
2369static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2370 const struct cntr_entry *entry,
2371 void *context, int vl, int mode, u64 data)
2372{
2373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374
2375 return dd->rcv_err_status_cnt[36];
2376}
2377
2378static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2379 const struct cntr_entry *entry,
2380 void *context, int vl, int mode, u64 data)
2381{
2382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383
2384 return dd->rcv_err_status_cnt[35];
2385}
2386
2387static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2388 const struct cntr_entry *entry,
2389 void *context, int vl, int mode, u64 data)
2390{
2391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392
2393 return dd->rcv_err_status_cnt[34];
2394}
2395
2396static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2397 const struct cntr_entry *entry,
2398 void *context, int vl, int mode, u64 data)
2399{
2400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401
2402 return dd->rcv_err_status_cnt[33];
2403}
2404
2405static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2406 void *context, int vl, int mode,
2407 u64 data)
2408{
2409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410
2411 return dd->rcv_err_status_cnt[32];
2412}
2413
2414static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2415 void *context, int vl, int mode,
2416 u64 data)
2417{
2418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419
2420 return dd->rcv_err_status_cnt[31];
2421}
2422
2423static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2424 void *context, int vl, int mode,
2425 u64 data)
2426{
2427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428
2429 return dd->rcv_err_status_cnt[30];
2430}
2431
2432static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2433 void *context, int vl, int mode,
2434 u64 data)
2435{
2436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437
2438 return dd->rcv_err_status_cnt[29];
2439}
2440
2441static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2442 void *context, int vl,
2443 int mode, u64 data)
2444{
2445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446
2447 return dd->rcv_err_status_cnt[28];
2448}
2449
2450static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2451 const struct cntr_entry *entry,
2452 void *context, int vl, int mode, u64 data)
2453{
2454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455
2456 return dd->rcv_err_status_cnt[27];
2457}
2458
2459static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2460 const struct cntr_entry *entry,
2461 void *context, int vl, int mode, u64 data)
2462{
2463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464
2465 return dd->rcv_err_status_cnt[26];
2466}
2467
2468static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2469 const struct cntr_entry *entry,
2470 void *context, int vl, int mode, u64 data)
2471{
2472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473
2474 return dd->rcv_err_status_cnt[25];
2475}
2476
2477static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2478 const struct cntr_entry *entry,
2479 void *context, int vl, int mode, u64 data)
2480{
2481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482
2483 return dd->rcv_err_status_cnt[24];
2484}
2485
2486static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2487 const struct cntr_entry *entry,
2488 void *context, int vl, int mode, u64 data)
2489{
2490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491
2492 return dd->rcv_err_status_cnt[23];
2493}
2494
2495static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2496 const struct cntr_entry *entry,
2497 void *context, int vl, int mode, u64 data)
2498{
2499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500
2501 return dd->rcv_err_status_cnt[22];
2502}
2503
2504static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2505 const struct cntr_entry *entry,
2506 void *context, int vl, int mode, u64 data)
2507{
2508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509
2510 return dd->rcv_err_status_cnt[21];
2511}
2512
2513static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2514 const struct cntr_entry *entry,
2515 void *context, int vl, int mode, u64 data)
2516{
2517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518
2519 return dd->rcv_err_status_cnt[20];
2520}
2521
2522static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2523 const struct cntr_entry *entry,
2524 void *context, int vl, int mode, u64 data)
2525{
2526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527
2528 return dd->rcv_err_status_cnt[19];
2529}
2530
2531static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2532 void *context, int vl,
2533 int mode, u64 data)
2534{
2535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536
2537 return dd->rcv_err_status_cnt[18];
2538}
2539
2540static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2541 void *context, int vl,
2542 int mode, u64 data)
2543{
2544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545
2546 return dd->rcv_err_status_cnt[17];
2547}
2548
2549static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2550 const struct cntr_entry *entry,
2551 void *context, int vl, int mode, u64 data)
2552{
2553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554
2555 return dd->rcv_err_status_cnt[16];
2556}
2557
2558static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2559 const struct cntr_entry *entry,
2560 void *context, int vl, int mode, u64 data)
2561{
2562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563
2564 return dd->rcv_err_status_cnt[15];
2565}
2566
2567static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2568 void *context, int vl,
2569 int mode, u64 data)
2570{
2571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572
2573 return dd->rcv_err_status_cnt[14];
2574}
2575
2576static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2577 void *context, int vl,
2578 int mode, u64 data)
2579{
2580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581
2582 return dd->rcv_err_status_cnt[13];
2583}
2584
2585static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2586 void *context, int vl, int mode,
2587 u64 data)
2588{
2589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590
2591 return dd->rcv_err_status_cnt[12];
2592}
2593
2594static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2595 void *context, int vl, int mode,
2596 u64 data)
2597{
2598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599
2600 return dd->rcv_err_status_cnt[11];
2601}
2602
2603static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2604 void *context, int vl, int mode,
2605 u64 data)
2606{
2607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608
2609 return dd->rcv_err_status_cnt[10];
2610}
2611
2612static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2613 void *context, int vl, int mode,
2614 u64 data)
2615{
2616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617
2618 return dd->rcv_err_status_cnt[9];
2619}
2620
2621static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2622 void *context, int vl, int mode,
2623 u64 data)
2624{
2625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626
2627 return dd->rcv_err_status_cnt[8];
2628}
2629
2630static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2631 const struct cntr_entry *entry,
2632 void *context, int vl, int mode, u64 data)
2633{
2634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635
2636 return dd->rcv_err_status_cnt[7];
2637}
2638
2639static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2640 const struct cntr_entry *entry,
2641 void *context, int vl, int mode, u64 data)
2642{
2643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644
2645 return dd->rcv_err_status_cnt[6];
2646}
2647
2648static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2649 void *context, int vl, int mode,
2650 u64 data)
2651{
2652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653
2654 return dd->rcv_err_status_cnt[5];
2655}
2656
2657static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2658 void *context, int vl, int mode,
2659 u64 data)
2660{
2661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662
2663 return dd->rcv_err_status_cnt[4];
2664}
2665
2666static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2667 void *context, int vl, int mode,
2668 u64 data)
2669{
2670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671
2672 return dd->rcv_err_status_cnt[3];
2673}
2674
2675static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2676 void *context, int vl, int mode,
2677 u64 data)
2678{
2679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680
2681 return dd->rcv_err_status_cnt[2];
2682}
2683
2684static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2685 void *context, int vl, int mode,
2686 u64 data)
2687{
2688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2689
2690 return dd->rcv_err_status_cnt[1];
2691}
2692
2693static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2694 void *context, int vl, int mode,
2695 u64 data)
2696{
2697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2698
2699 return dd->rcv_err_status_cnt[0];
2700}
2701
2702/*
2703 * Software counters corresponding to each of the
2704 * error status bits within SendPioErrStatus
2705 */
2706static u64 access_pio_pec_sop_head_parity_err_cnt(
2707 const struct cntr_entry *entry,
2708 void *context, int vl, int mode, u64 data)
2709{
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712 return dd->send_pio_err_status_cnt[35];
2713}
2714
2715static u64 access_pio_pcc_sop_head_parity_err_cnt(
2716 const struct cntr_entry *entry,
2717 void *context, int vl, int mode, u64 data)
2718{
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->send_pio_err_status_cnt[34];
2722}
2723
2724static u64 access_pio_last_returned_cnt_parity_err_cnt(
2725 const struct cntr_entry *entry,
2726 void *context, int vl, int mode, u64 data)
2727{
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->send_pio_err_status_cnt[33];
2731}
2732
2733static u64 access_pio_current_free_cnt_parity_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2736{
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->send_pio_err_status_cnt[32];
2740}
2741
2742static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2744 u64 data)
2745{
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->send_pio_err_status_cnt[31];
2749}
2750
2751static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2753 u64 data)
2754{
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->send_pio_err_status_cnt[30];
2758}
2759
2760static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763{
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->send_pio_err_status_cnt[29];
2767}
2768
2769static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2770 const struct cntr_entry *entry,
2771 void *context, int vl, int mode, u64 data)
2772{
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->send_pio_err_status_cnt[28];
2776}
2777
2778static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl, int mode,
2780 u64 data)
2781{
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->send_pio_err_status_cnt[27];
2785}
2786
2787static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2789 u64 data)
2790{
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->send_pio_err_status_cnt[26];
2794}
2795
2796static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2797 void *context, int vl,
2798 int mode, u64 data)
2799{
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801
2802 return dd->send_pio_err_status_cnt[25];
2803}
2804
2805static u64 access_pio_block_qw_count_parity_err_cnt(
2806 const struct cntr_entry *entry,
2807 void *context, int vl, int mode, u64 data)
2808{
2809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810
2811 return dd->send_pio_err_status_cnt[24];
2812}
2813
2814static u64 access_pio_write_qw_valid_parity_err_cnt(
2815 const struct cntr_entry *entry,
2816 void *context, int vl, int mode, u64 data)
2817{
2818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819
2820 return dd->send_pio_err_status_cnt[23];
2821}
2822
2823static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2824 void *context, int vl, int mode,
2825 u64 data)
2826{
2827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828
2829 return dd->send_pio_err_status_cnt[22];
2830}
2831
2832static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2833 void *context, int vl,
2834 int mode, u64 data)
2835{
2836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837
2838 return dd->send_pio_err_status_cnt[21];
2839}
2840
2841static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2842 void *context, int vl,
2843 int mode, u64 data)
2844{
2845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846
2847 return dd->send_pio_err_status_cnt[20];
2848}
2849
2850static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2851 void *context, int vl,
2852 int mode, u64 data)
2853{
2854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855
2856 return dd->send_pio_err_status_cnt[19];
2857}
2858
2859static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2860 const struct cntr_entry *entry,
2861 void *context, int vl, int mode, u64 data)
2862{
2863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864
2865 return dd->send_pio_err_status_cnt[18];
2866}
2867
2868static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2869 void *context, int vl, int mode,
2870 u64 data)
2871{
2872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873
2874 return dd->send_pio_err_status_cnt[17];
2875}
2876
2877static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2878 void *context, int vl, int mode,
2879 u64 data)
2880{
2881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882
2883 return dd->send_pio_err_status_cnt[16];
2884}
2885
2886static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2887 const struct cntr_entry *entry,
2888 void *context, int vl, int mode, u64 data)
2889{
2890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891
2892 return dd->send_pio_err_status_cnt[15];
2893}
2894
2895static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2896 const struct cntr_entry *entry,
2897 void *context, int vl, int mode, u64 data)
2898{
2899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900
2901 return dd->send_pio_err_status_cnt[14];
2902}
2903
2904static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2905 const struct cntr_entry *entry,
2906 void *context, int vl, int mode, u64 data)
2907{
2908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909
2910 return dd->send_pio_err_status_cnt[13];
2911}
2912
2913static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2914 const struct cntr_entry *entry,
2915 void *context, int vl, int mode, u64 data)
2916{
2917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918
2919 return dd->send_pio_err_status_cnt[12];
2920}
2921
2922static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2923 const struct cntr_entry *entry,
2924 void *context, int vl, int mode, u64 data)
2925{
2926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927
2928 return dd->send_pio_err_status_cnt[11];
2929}
2930
2931static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2932 const struct cntr_entry *entry,
2933 void *context, int vl, int mode, u64 data)
2934{
2935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936
2937 return dd->send_pio_err_status_cnt[10];
2938}
2939
2940static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2941 const struct cntr_entry *entry,
2942 void *context, int vl, int mode, u64 data)
2943{
2944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945
2946 return dd->send_pio_err_status_cnt[9];
2947}
2948
2949static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2950 const struct cntr_entry *entry,
2951 void *context, int vl, int mode, u64 data)
2952{
2953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954
2955 return dd->send_pio_err_status_cnt[8];
2956}
2957
2958static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2959 const struct cntr_entry *entry,
2960 void *context, int vl, int mode, u64 data)
2961{
2962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963
2964 return dd->send_pio_err_status_cnt[7];
2965}
2966
2967static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2968 void *context, int vl, int mode,
2969 u64 data)
2970{
2971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972
2973 return dd->send_pio_err_status_cnt[6];
2974}
2975
2976static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2977 void *context, int vl, int mode,
2978 u64 data)
2979{
2980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981
2982 return dd->send_pio_err_status_cnt[5];
2983}
2984
2985static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2986 void *context, int vl, int mode,
2987 u64 data)
2988{
2989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990
2991 return dd->send_pio_err_status_cnt[4];
2992}
2993
2994static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2995 void *context, int vl, int mode,
2996 u64 data)
2997{
2998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999
3000 return dd->send_pio_err_status_cnt[3];
3001}
3002
3003static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3004 void *context, int vl, int mode,
3005 u64 data)
3006{
3007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008
3009 return dd->send_pio_err_status_cnt[2];
3010}
3011
3012static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3013 void *context, int vl,
3014 int mode, u64 data)
3015{
3016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3017
3018 return dd->send_pio_err_status_cnt[1];
3019}
3020
3021static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3022 void *context, int vl, int mode,
3023 u64 data)
3024{
3025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3026
3027 return dd->send_pio_err_status_cnt[0];
3028}
3029
3030/*
3031 * Software counters corresponding to each of the
3032 * error status bits within SendDmaErrStatus
3033 */
3034static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3035 const struct cntr_entry *entry,
3036 void *context, int vl, int mode, u64 data)
3037{
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040 return dd->send_dma_err_status_cnt[3];
3041}
3042
3043static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3046{
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_dma_err_status_cnt[2];
3050}
3051
3052static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3053 void *context, int vl, int mode,
3054 u64 data)
3055{
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058 return dd->send_dma_err_status_cnt[1];
3059}
3060
3061static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3062 void *context, int vl, int mode,
3063 u64 data)
3064{
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067 return dd->send_dma_err_status_cnt[0];
3068}
3069
3070/*
3071 * Software counters corresponding to each of the
3072 * error status bits within SendEgressErrStatus
3073 */
3074static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3075 const struct cntr_entry *entry,
3076 void *context, int vl, int mode, u64 data)
3077{
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079
3080 return dd->send_egress_err_status_cnt[63];
3081}
3082
3083static u64 access_tx_read_sdma_memory_csr_err_cnt(
3084 const struct cntr_entry *entry,
3085 void *context, int vl, int mode, u64 data)
3086{
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_egress_err_status_cnt[62];
3090}
3091
3092static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3093 void *context, int vl, int mode,
3094 u64 data)
3095{
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_egress_err_status_cnt[61];
3099}
3100
3101static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl,
3103 int mode, u64 data)
3104{
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_egress_err_status_cnt[60];
3108}
3109
3110static u64 access_tx_read_sdma_memory_cor_err_cnt(
3111 const struct cntr_entry *entry,
3112 void *context, int vl, int mode, u64 data)
3113{
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_egress_err_status_cnt[59];
3117}
3118
3119static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3120 void *context, int vl, int mode,
3121 u64 data)
3122{
3123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124
3125 return dd->send_egress_err_status_cnt[58];
3126}
3127
3128static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3129 void *context, int vl, int mode,
3130 u64 data)
3131{
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_egress_err_status_cnt[57];
3135}
3136
3137static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3138 void *context, int vl, int mode,
3139 u64 data)
3140{
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_egress_err_status_cnt[56];
3144}
3145
3146static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149{
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_egress_err_status_cnt[55];
3153}
3154
3155static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158{
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_egress_err_status_cnt[54];
3162}
3163
3164static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3166 u64 data)
3167{
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169
3170 return dd->send_egress_err_status_cnt[53];
3171}
3172
3173static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3174 void *context, int vl, int mode,
3175 u64 data)
3176{
3177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178
3179 return dd->send_egress_err_status_cnt[52];
3180}
3181
3182static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3183 void *context, int vl, int mode,
3184 u64 data)
3185{
3186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187
3188 return dd->send_egress_err_status_cnt[51];
3189}
3190
3191static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3192 void *context, int vl, int mode,
3193 u64 data)
3194{
3195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196
3197 return dd->send_egress_err_status_cnt[50];
3198}
3199
3200static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3201 void *context, int vl, int mode,
3202 u64 data)
3203{
3204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205
3206 return dd->send_egress_err_status_cnt[49];
3207}
3208
3209static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3210 void *context, int vl, int mode,
3211 u64 data)
3212{
3213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214
3215 return dd->send_egress_err_status_cnt[48];
3216}
3217
3218static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3219 void *context, int vl, int mode,
3220 u64 data)
3221{
3222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223
3224 return dd->send_egress_err_status_cnt[47];
3225}
3226
3227static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3228 void *context, int vl, int mode,
3229 u64 data)
3230{
3231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232
3233 return dd->send_egress_err_status_cnt[46];
3234}
3235
3236static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3237 void *context, int vl, int mode,
3238 u64 data)
3239{
3240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241
3242 return dd->send_egress_err_status_cnt[45];
3243}
3244
3245static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3246 void *context, int vl,
3247 int mode, u64 data)
3248{
3249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250
3251 return dd->send_egress_err_status_cnt[44];
3252}
3253
3254static u64 access_tx_read_sdma_memory_unc_err_cnt(
3255 const struct cntr_entry *entry,
3256 void *context, int vl, int mode, u64 data)
3257{
3258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259
3260 return dd->send_egress_err_status_cnt[43];
3261}
3262
3263static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3264 void *context, int vl, int mode,
3265 u64 data)
3266{
3267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268
3269 return dd->send_egress_err_status_cnt[42];
3270}
3271
3272static u64 access_tx_credit_return_partiy_err_cnt(
3273 const struct cntr_entry *entry,
3274 void *context, int vl, int mode, u64 data)
3275{
3276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277
3278 return dd->send_egress_err_status_cnt[41];
3279}
3280
3281static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3282 const struct cntr_entry *entry,
3283 void *context, int vl, int mode, u64 data)
3284{
3285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286
3287 return dd->send_egress_err_status_cnt[40];
3288}
3289
3290static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3291 const struct cntr_entry *entry,
3292 void *context, int vl, int mode, u64 data)
3293{
3294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295
3296 return dd->send_egress_err_status_cnt[39];
3297}
3298
3299static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3300 const struct cntr_entry *entry,
3301 void *context, int vl, int mode, u64 data)
3302{
3303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304
3305 return dd->send_egress_err_status_cnt[38];
3306}
3307
3308static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3309 const struct cntr_entry *entry,
3310 void *context, int vl, int mode, u64 data)
3311{
3312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313
3314 return dd->send_egress_err_status_cnt[37];
3315}
3316
3317static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3318 const struct cntr_entry *entry,
3319 void *context, int vl, int mode, u64 data)
3320{
3321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322
3323 return dd->send_egress_err_status_cnt[36];
3324}
3325
3326static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3327 const struct cntr_entry *entry,
3328 void *context, int vl, int mode, u64 data)
3329{
3330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331
3332 return dd->send_egress_err_status_cnt[35];
3333}
3334
3335static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3336 const struct cntr_entry *entry,
3337 void *context, int vl, int mode, u64 data)
3338{
3339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340
3341 return dd->send_egress_err_status_cnt[34];
3342}
3343
3344static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3345 const struct cntr_entry *entry,
3346 void *context, int vl, int mode, u64 data)
3347{
3348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349
3350 return dd->send_egress_err_status_cnt[33];
3351}
3352
3353static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3354 const struct cntr_entry *entry,
3355 void *context, int vl, int mode, u64 data)
3356{
3357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358
3359 return dd->send_egress_err_status_cnt[32];
3360}
3361
3362static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3363 const struct cntr_entry *entry,
3364 void *context, int vl, int mode, u64 data)
3365{
3366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367
3368 return dd->send_egress_err_status_cnt[31];
3369}
3370
3371static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3372 const struct cntr_entry *entry,
3373 void *context, int vl, int mode, u64 data)
3374{
3375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376
3377 return dd->send_egress_err_status_cnt[30];
3378}
3379
3380static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3381 const struct cntr_entry *entry,
3382 void *context, int vl, int mode, u64 data)
3383{
3384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385
3386 return dd->send_egress_err_status_cnt[29];
3387}
3388
3389static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3390 const struct cntr_entry *entry,
3391 void *context, int vl, int mode, u64 data)
3392{
3393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394
3395 return dd->send_egress_err_status_cnt[28];
3396}
3397
3398static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3399 const struct cntr_entry *entry,
3400 void *context, int vl, int mode, u64 data)
3401{
3402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403
3404 return dd->send_egress_err_status_cnt[27];
3405}
3406
3407static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3408 const struct cntr_entry *entry,
3409 void *context, int vl, int mode, u64 data)
3410{
3411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412
3413 return dd->send_egress_err_status_cnt[26];
3414}
3415
3416static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3417 const struct cntr_entry *entry,
3418 void *context, int vl, int mode, u64 data)
3419{
3420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421
3422 return dd->send_egress_err_status_cnt[25];
3423}
3424
3425static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3426 const struct cntr_entry *entry,
3427 void *context, int vl, int mode, u64 data)
3428{
3429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430
3431 return dd->send_egress_err_status_cnt[24];
3432}
3433
3434static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3435 const struct cntr_entry *entry,
3436 void *context, int vl, int mode, u64 data)
3437{
3438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439
3440 return dd->send_egress_err_status_cnt[23];
3441}
3442
3443static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3444 const struct cntr_entry *entry,
3445 void *context, int vl, int mode, u64 data)
3446{
3447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448
3449 return dd->send_egress_err_status_cnt[22];
3450}
3451
3452static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3453 const struct cntr_entry *entry,
3454 void *context, int vl, int mode, u64 data)
3455{
3456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457
3458 return dd->send_egress_err_status_cnt[21];
3459}
3460
3461static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3462 const struct cntr_entry *entry,
3463 void *context, int vl, int mode, u64 data)
3464{
3465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466
3467 return dd->send_egress_err_status_cnt[20];
3468}
3469
3470static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3471 const struct cntr_entry *entry,
3472 void *context, int vl, int mode, u64 data)
3473{
3474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475
3476 return dd->send_egress_err_status_cnt[19];
3477}
3478
3479static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3480 const struct cntr_entry *entry,
3481 void *context, int vl, int mode, u64 data)
3482{
3483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484
3485 return dd->send_egress_err_status_cnt[18];
3486}
3487
3488static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3489 const struct cntr_entry *entry,
3490 void *context, int vl, int mode, u64 data)
3491{
3492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493
3494 return dd->send_egress_err_status_cnt[17];
3495}
3496
3497static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3498 const struct cntr_entry *entry,
3499 void *context, int vl, int mode, u64 data)
3500{
3501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502
3503 return dd->send_egress_err_status_cnt[16];
3504}
3505
3506static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3507 void *context, int vl, int mode,
3508 u64 data)
3509{
3510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511
3512 return dd->send_egress_err_status_cnt[15];
3513}
3514
3515static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3516 void *context, int vl,
3517 int mode, u64 data)
3518{
3519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520
3521 return dd->send_egress_err_status_cnt[14];
3522}
3523
3524static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3525 void *context, int vl, int mode,
3526 u64 data)
3527{
3528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529
3530 return dd->send_egress_err_status_cnt[13];
3531}
3532
3533static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3534 void *context, int vl, int mode,
3535 u64 data)
3536{
3537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538
3539 return dd->send_egress_err_status_cnt[12];
3540}
3541
3542static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3543 const struct cntr_entry *entry,
3544 void *context, int vl, int mode, u64 data)
3545{
3546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547
3548 return dd->send_egress_err_status_cnt[11];
3549}
3550
3551static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3552 void *context, int vl, int mode,
3553 u64 data)
3554{
3555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556
3557 return dd->send_egress_err_status_cnt[10];
3558}
3559
3560static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3561 void *context, int vl, int mode,
3562 u64 data)
3563{
3564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565
3566 return dd->send_egress_err_status_cnt[9];
3567}
3568
3569static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3570 const struct cntr_entry *entry,
3571 void *context, int vl, int mode, u64 data)
3572{
3573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574
3575 return dd->send_egress_err_status_cnt[8];
3576}
3577
3578static u64 access_tx_pio_launch_intf_parity_err_cnt(
3579 const struct cntr_entry *entry,
3580 void *context, int vl, int mode, u64 data)
3581{
3582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583
3584 return dd->send_egress_err_status_cnt[7];
3585}
3586
3587static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3588 void *context, int vl, int mode,
3589 u64 data)
3590{
3591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592
3593 return dd->send_egress_err_status_cnt[6];
3594}
3595
3596static u64 access_tx_incorrect_link_state_err_cnt(
3597 const struct cntr_entry *entry,
3598 void *context, int vl, int mode, u64 data)
3599{
3600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601
3602 return dd->send_egress_err_status_cnt[5];
3603}
3604
3605static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3606 void *context, int vl, int mode,
3607 u64 data)
3608{
3609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610
3611 return dd->send_egress_err_status_cnt[4];
3612}
3613
3614static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3615 const struct cntr_entry *entry,
3616 void *context, int vl, int mode, u64 data)
3617{
3618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619
3620 return dd->send_egress_err_status_cnt[3];
3621}
3622
3623static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3624 void *context, int vl, int mode,
3625 u64 data)
3626{
3627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628
3629 return dd->send_egress_err_status_cnt[2];
3630}
3631
3632static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3633 const struct cntr_entry *entry,
3634 void *context, int vl, int mode, u64 data)
3635{
3636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3637
3638 return dd->send_egress_err_status_cnt[1];
3639}
3640
3641static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3642 const struct cntr_entry *entry,
3643 void *context, int vl, int mode, u64 data)
3644{
3645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3646
3647 return dd->send_egress_err_status_cnt[0];
3648}
3649
3650/*
3651 * Software counters corresponding to each of the
3652 * error status bits within SendErrStatus
3653 */
3654static u64 access_send_csr_write_bad_addr_err_cnt(
3655 const struct cntr_entry *entry,
3656 void *context, int vl, int mode, u64 data)
3657{
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660 return dd->send_err_status_cnt[2];
3661}
3662
3663static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3664 void *context, int vl,
3665 int mode, u64 data)
3666{
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669 return dd->send_err_status_cnt[1];
3670}
3671
3672static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3673 void *context, int vl, int mode,
3674 u64 data)
3675{
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678 return dd->send_err_status_cnt[0];
3679}
3680
3681/*
3682 * Software counters corresponding to each of the
3683 * error status bits within SendCtxtErrStatus
3684 */
3685static u64 access_pio_write_out_of_bounds_err_cnt(
3686 const struct cntr_entry *entry,
3687 void *context, int vl, int mode, u64 data)
3688{
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690
3691 return dd->sw_ctxt_err_status_cnt[4];
3692}
3693
3694static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3695 void *context, int vl, int mode,
3696 u64 data)
3697{
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->sw_ctxt_err_status_cnt[3];
3701}
3702
3703static u64 access_pio_write_crosses_boundary_err_cnt(
3704 const struct cntr_entry *entry,
3705 void *context, int vl, int mode, u64 data)
3706{
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->sw_ctxt_err_status_cnt[2];
3710}
3711
3712static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3713 void *context, int vl,
3714 int mode, u64 data)
3715{
3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717
3718 return dd->sw_ctxt_err_status_cnt[1];
3719}
3720
3721static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3722 void *context, int vl, int mode,
3723 u64 data)
3724{
3725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726
3727 return dd->sw_ctxt_err_status_cnt[0];
3728}
3729
3730/*
3731 * Software counters corresponding to each of the
3732 * error status bits within SendDmaEngErrStatus
3733 */
3734static u64 access_sdma_header_request_fifo_cor_err_cnt(
3735 const struct cntr_entry *entry,
3736 void *context, int vl, int mode, u64 data)
3737{
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740 return dd->sw_send_dma_eng_err_status_cnt[23];
3741}
3742
3743static u64 access_sdma_header_storage_cor_err_cnt(
3744 const struct cntr_entry *entry,
3745 void *context, int vl, int mode, u64 data)
3746{
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->sw_send_dma_eng_err_status_cnt[22];
3750}
3751
3752static u64 access_sdma_packet_tracking_cor_err_cnt(
3753 const struct cntr_entry *entry,
3754 void *context, int vl, int mode, u64 data)
3755{
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->sw_send_dma_eng_err_status_cnt[21];
3759}
3760
3761static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3762 void *context, int vl, int mode,
3763 u64 data)
3764{
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->sw_send_dma_eng_err_status_cnt[20];
3768}
3769
3770static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3771 void *context, int vl, int mode,
3772 u64 data)
3773{
3774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775
3776 return dd->sw_send_dma_eng_err_status_cnt[19];
3777}
3778
3779static u64 access_sdma_header_request_fifo_unc_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3782{
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_send_dma_eng_err_status_cnt[18];
3786}
3787
3788static u64 access_sdma_header_storage_unc_err_cnt(
3789 const struct cntr_entry *entry,
3790 void *context, int vl, int mode, u64 data)
3791{
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_send_dma_eng_err_status_cnt[17];
3795}
3796
3797static u64 access_sdma_packet_tracking_unc_err_cnt(
3798 const struct cntr_entry *entry,
3799 void *context, int vl, int mode, u64 data)
3800{
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_send_dma_eng_err_status_cnt[16];
3804}
3805
3806static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl, int mode,
3808 u64 data)
3809{
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_send_dma_eng_err_status_cnt[15];
3813}
3814
3815static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818{
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_send_dma_eng_err_status_cnt[14];
3822}
3823
3824static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3826 u64 data)
3827{
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829
3830 return dd->sw_send_dma_eng_err_status_cnt[13];
3831}
3832
3833static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3834 void *context, int vl, int mode,
3835 u64 data)
3836{
3837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838
3839 return dd->sw_send_dma_eng_err_status_cnt[12];
3840}
3841
3842static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3843 void *context, int vl, int mode,
3844 u64 data)
3845{
3846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847
3848 return dd->sw_send_dma_eng_err_status_cnt[11];
3849}
3850
3851static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3852 void *context, int vl, int mode,
3853 u64 data)
3854{
3855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856
3857 return dd->sw_send_dma_eng_err_status_cnt[10];
3858}
3859
3860static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3861 void *context, int vl, int mode,
3862 u64 data)
3863{
3864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865
3866 return dd->sw_send_dma_eng_err_status_cnt[9];
3867}
3868
3869static u64 access_sdma_packet_desc_overflow_err_cnt(
3870 const struct cntr_entry *entry,
3871 void *context, int vl, int mode, u64 data)
3872{
3873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874
3875 return dd->sw_send_dma_eng_err_status_cnt[8];
3876}
3877
3878static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3879 void *context, int vl,
3880 int mode, u64 data)
3881{
3882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883
3884 return dd->sw_send_dma_eng_err_status_cnt[7];
3885}
3886
3887static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3888 void *context, int vl, int mode, u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[6];
3893}
3894
3895static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode,
3897 u64 data)
3898{
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[5];
3902}
3903
3904static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907{
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[4];
3911}
3912
3913static u64 access_sdma_tail_out_of_bounds_err_cnt(
3914 const struct cntr_entry *entry,
3915 void *context, int vl, int mode, u64 data)
3916{
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[3];
3920}
3921
3922static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3923 void *context, int vl, int mode,
3924 u64 data)
3925{
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[2];
3929}
3930
3931static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3932 void *context, int vl, int mode,
3933 u64 data)
3934{
3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936
3937 return dd->sw_send_dma_eng_err_status_cnt[1];
3938}
3939
3940static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3941 void *context, int vl, int mode,
3942 u64 data)
3943{
3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945
3946 return dd->sw_send_dma_eng_err_status_cnt[0];
3947}
3948
Mike Marciniszyn77241052015-07-30 15:17:43 -04003949#define def_access_sw_cpu(cntr) \
3950static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3951 void *context, int vl, int mode, u64 data) \
3952{ \
3953 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003954 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3955 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003956 mode, data); \
3957}
3958
3959def_access_sw_cpu(rc_acks);
3960def_access_sw_cpu(rc_qacks);
3961def_access_sw_cpu(rc_delayed_comp);
3962
3963#define def_access_ibp_counter(cntr) \
3964static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3965 void *context, int vl, int mode, u64 data) \
3966{ \
3967 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3968 \
3969 if (vl != CNTR_INVALID_VL) \
3970 return 0; \
3971 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003972 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003973 mode, data); \
3974}
3975
3976def_access_ibp_counter(loop_pkts);
3977def_access_ibp_counter(rc_resends);
3978def_access_ibp_counter(rnr_naks);
3979def_access_ibp_counter(other_naks);
3980def_access_ibp_counter(rc_timeouts);
3981def_access_ibp_counter(pkt_drops);
3982def_access_ibp_counter(dmawait);
3983def_access_ibp_counter(rc_seqnak);
3984def_access_ibp_counter(rc_dupreq);
3985def_access_ibp_counter(rdma_seq);
3986def_access_ibp_counter(unaligned);
3987def_access_ibp_counter(seq_naks);
3988
3989static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3990[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3991[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3992 CNTR_NORMAL),
3993[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3994 CNTR_NORMAL),
3995[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3996 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3997 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003998[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3999 CNTR_NORMAL),
4000[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4001 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4002[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4003 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4004[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4005 CNTR_NORMAL),
4006[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4007 CNTR_NORMAL),
4008[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4009 CNTR_NORMAL),
4010[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4011 CNTR_NORMAL),
4012[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4013 CNTR_NORMAL),
4014[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4015 CNTR_NORMAL),
4016[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4017 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4018[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4019 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4020[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4021 CNTR_SYNTH),
4022[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4023[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4024 CNTR_SYNTH),
4025[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4026 CNTR_SYNTH),
4027[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4028 CNTR_SYNTH),
4029[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4030 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4031[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4032 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4033 CNTR_SYNTH),
4034[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4035 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4036[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4037 CNTR_SYNTH),
4038[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4039 CNTR_SYNTH),
4040[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4041 CNTR_SYNTH),
4042[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4043 CNTR_SYNTH),
4044[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4045 CNTR_SYNTH),
4046[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4047 CNTR_SYNTH),
4048[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4049 CNTR_SYNTH),
4050[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4051 CNTR_SYNTH | CNTR_VL),
4052[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4053 CNTR_SYNTH | CNTR_VL),
4054[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4055[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4056 CNTR_SYNTH | CNTR_VL),
4057[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4058[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4059 CNTR_SYNTH | CNTR_VL),
4060[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4061 CNTR_SYNTH),
4062[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4063 CNTR_SYNTH | CNTR_VL),
4064[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4065 CNTR_SYNTH),
4066[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4067 CNTR_SYNTH | CNTR_VL),
4068[C_DC_TOTAL_CRC] =
4069 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4070 CNTR_SYNTH),
4071[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4072 CNTR_SYNTH),
4073[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4074 CNTR_SYNTH),
4075[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4076 CNTR_SYNTH),
4077[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4078 CNTR_SYNTH),
4079[C_DC_CRC_MULT_LN] =
4080 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4081 CNTR_SYNTH),
4082[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4083 CNTR_SYNTH),
4084[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4085 CNTR_SYNTH),
4086[C_DC_SEQ_CRC_CNT] =
4087 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4088 CNTR_SYNTH),
4089[C_DC_ESC0_ONLY_CNT] =
4090 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4091 CNTR_SYNTH),
4092[C_DC_ESC0_PLUS1_CNT] =
4093 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4094 CNTR_SYNTH),
4095[C_DC_ESC0_PLUS2_CNT] =
4096 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4097 CNTR_SYNTH),
4098[C_DC_REINIT_FROM_PEER_CNT] =
4099 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4100 CNTR_SYNTH),
4101[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4102 CNTR_SYNTH),
4103[C_DC_MISC_FLG_CNT] =
4104 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4105 CNTR_SYNTH),
4106[C_DC_PRF_GOOD_LTP_CNT] =
4107 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4108[C_DC_PRF_ACCEPTED_LTP_CNT] =
4109 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4110 CNTR_SYNTH),
4111[C_DC_PRF_RX_FLIT_CNT] =
4112 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4113[C_DC_PRF_TX_FLIT_CNT] =
4114 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4115[C_DC_PRF_CLK_CNTR] =
4116 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4117[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4118 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4119[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4120 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4121 CNTR_SYNTH),
4122[C_DC_PG_STS_TX_SBE_CNT] =
4123 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4124[C_DC_PG_STS_TX_MBE_CNT] =
4125 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4126 CNTR_SYNTH),
4127[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4128 access_sw_cpu_intr),
4129[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4130 access_sw_cpu_rcv_limit),
4131[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4132 access_sw_vtx_wait),
4133[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4134 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004135[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4136 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004137[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4138 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004139[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4140 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004141[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4142 SEND_DMA_DESC_FETCHED_CNT, 0,
4143 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4144 dev_access_u32_csr),
4145[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4146 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4147 access_sde_int_cnt),
4148[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4149 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4150 access_sde_err_cnt),
4151[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4152 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4153 access_sde_idle_int_cnt),
4154[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4155 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4156 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004157/* MISC_ERR_STATUS */
4158[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4159 CNTR_NORMAL,
4160 access_misc_pll_lock_fail_err_cnt),
4161[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4162 CNTR_NORMAL,
4163 access_misc_mbist_fail_err_cnt),
4164[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4165 CNTR_NORMAL,
4166 access_misc_invalid_eep_cmd_err_cnt),
4167[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4168 CNTR_NORMAL,
4169 access_misc_efuse_done_parity_err_cnt),
4170[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4171 CNTR_NORMAL,
4172 access_misc_efuse_write_err_cnt),
4173[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4174 0, CNTR_NORMAL,
4175 access_misc_efuse_read_bad_addr_err_cnt),
4176[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4177 CNTR_NORMAL,
4178 access_misc_efuse_csr_parity_err_cnt),
4179[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4180 CNTR_NORMAL,
4181 access_misc_fw_auth_failed_err_cnt),
4182[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4183 CNTR_NORMAL,
4184 access_misc_key_mismatch_err_cnt),
4185[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4186 CNTR_NORMAL,
4187 access_misc_sbus_write_failed_err_cnt),
4188[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4189 CNTR_NORMAL,
4190 access_misc_csr_write_bad_addr_err_cnt),
4191[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4192 CNTR_NORMAL,
4193 access_misc_csr_read_bad_addr_err_cnt),
4194[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4195 CNTR_NORMAL,
4196 access_misc_csr_parity_err_cnt),
4197/* CceErrStatus */
4198[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4199 CNTR_NORMAL,
4200 access_sw_cce_err_status_aggregated_cnt),
4201[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4202 CNTR_NORMAL,
4203 access_cce_msix_csr_parity_err_cnt),
4204[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4205 CNTR_NORMAL,
4206 access_cce_int_map_unc_err_cnt),
4207[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4208 CNTR_NORMAL,
4209 access_cce_int_map_cor_err_cnt),
4210[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4211 CNTR_NORMAL,
4212 access_cce_msix_table_unc_err_cnt),
4213[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4214 CNTR_NORMAL,
4215 access_cce_msix_table_cor_err_cnt),
4216[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4217 0, CNTR_NORMAL,
4218 access_cce_rxdma_conv_fifo_parity_err_cnt),
4219[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4220 0, CNTR_NORMAL,
4221 access_cce_rcpl_async_fifo_parity_err_cnt),
4222[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4223 CNTR_NORMAL,
4224 access_cce_seg_write_bad_addr_err_cnt),
4225[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4226 CNTR_NORMAL,
4227 access_cce_seg_read_bad_addr_err_cnt),
4228[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4229 CNTR_NORMAL,
4230 access_la_triggered_cnt),
4231[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4232 CNTR_NORMAL,
4233 access_cce_trgt_cpl_timeout_err_cnt),
4234[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4235 CNTR_NORMAL,
4236 access_pcic_receive_parity_err_cnt),
4237[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4238 CNTR_NORMAL,
4239 access_pcic_transmit_back_parity_err_cnt),
4240[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4241 0, CNTR_NORMAL,
4242 access_pcic_transmit_front_parity_err_cnt),
4243[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4244 CNTR_NORMAL,
4245 access_pcic_cpl_dat_q_unc_err_cnt),
4246[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4247 CNTR_NORMAL,
4248 access_pcic_cpl_hd_q_unc_err_cnt),
4249[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4250 CNTR_NORMAL,
4251 access_pcic_post_dat_q_unc_err_cnt),
4252[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4253 CNTR_NORMAL,
4254 access_pcic_post_hd_q_unc_err_cnt),
4255[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4256 CNTR_NORMAL,
4257 access_pcic_retry_sot_mem_unc_err_cnt),
4258[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4259 CNTR_NORMAL,
4260 access_pcic_retry_mem_unc_err),
4261[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4262 CNTR_NORMAL,
4263 access_pcic_n_post_dat_q_parity_err_cnt),
4264[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4265 CNTR_NORMAL,
4266 access_pcic_n_post_h_q_parity_err_cnt),
4267[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4268 CNTR_NORMAL,
4269 access_pcic_cpl_dat_q_cor_err_cnt),
4270[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4271 CNTR_NORMAL,
4272 access_pcic_cpl_hd_q_cor_err_cnt),
4273[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4274 CNTR_NORMAL,
4275 access_pcic_post_dat_q_cor_err_cnt),
4276[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_pcic_post_hd_q_cor_err_cnt),
4279[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_pcic_retry_sot_mem_cor_err_cnt),
4282[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4283 CNTR_NORMAL,
4284 access_pcic_retry_mem_cor_err_cnt),
4285[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4286 "CceCli1AsyncFifoDbgParityError", 0, 0,
4287 CNTR_NORMAL,
4288 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4289[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4290 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4291 CNTR_NORMAL,
4292 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4293 ),
4294[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4295 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4298[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4299 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4302[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4303 0, CNTR_NORMAL,
4304 access_cce_cli2_async_fifo_parity_err_cnt),
4305[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4306 CNTR_NORMAL,
4307 access_cce_csr_cfg_bus_parity_err_cnt),
4308[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4309 0, CNTR_NORMAL,
4310 access_cce_cli0_async_fifo_parity_err_cnt),
4311[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_cce_rspd_data_parity_err_cnt),
4314[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_cce_trgt_access_err_cnt),
4317[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4318 0, CNTR_NORMAL,
4319 access_cce_trgt_async_fifo_parity_err_cnt),
4320[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_cce_csr_write_bad_addr_err_cnt),
4323[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_cce_csr_read_bad_addr_err_cnt),
4326[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_ccs_csr_parity_err_cnt),
4329
4330/* RcvErrStatus */
4331[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_rx_csr_parity_err_cnt),
4334[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4335 CNTR_NORMAL,
4336 access_rx_csr_write_bad_addr_err_cnt),
4337[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_rx_csr_read_bad_addr_err_cnt),
4340[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4341 CNTR_NORMAL,
4342 access_rx_dma_csr_unc_err_cnt),
4343[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_rx_dma_dq_fsm_encoding_err_cnt),
4346[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_rx_dma_eq_fsm_encoding_err_cnt),
4349[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4350 CNTR_NORMAL,
4351 access_rx_dma_csr_parity_err_cnt),
4352[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4353 CNTR_NORMAL,
4354 access_rx_rbuf_data_cor_err_cnt),
4355[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4356 CNTR_NORMAL,
4357 access_rx_rbuf_data_unc_err_cnt),
4358[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_rx_dma_data_fifo_rd_cor_err_cnt),
4361[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4362 CNTR_NORMAL,
4363 access_rx_dma_data_fifo_rd_unc_err_cnt),
4364[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4367[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4370[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_rx_rbuf_desc_part2_cor_err_cnt),
4373[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_rx_rbuf_desc_part2_unc_err_cnt),
4376[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_rx_rbuf_desc_part1_cor_err_cnt),
4379[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_rx_rbuf_desc_part1_unc_err_cnt),
4382[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_hq_intr_fsm_err_cnt),
4385[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_hq_intr_csr_parity_err_cnt),
4388[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_lookup_csr_parity_err_cnt),
4391[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_rx_lookup_rcv_array_cor_err_cnt),
4394[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rx_lookup_rcv_array_unc_err_cnt),
4397[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4398 0, CNTR_NORMAL,
4399 access_rx_lookup_des_part2_parity_err_cnt),
4400[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4401 0, CNTR_NORMAL,
4402 access_rx_lookup_des_part1_unc_cor_err_cnt),
4403[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_lookup_des_part1_unc_err_cnt),
4406[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_next_free_buf_cor_err_cnt),
4409[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rbuf_next_free_buf_unc_err_cnt),
4412[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4413 "RxRbufFlInitWrAddrParityErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4416[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4417 0, CNTR_NORMAL,
4418 access_rx_rbuf_fl_initdone_parity_err_cnt),
4419[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4420 0, CNTR_NORMAL,
4421 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4422[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4425[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_rbuf_empty_err_cnt),
4428[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4429 CNTR_NORMAL,
4430 access_rx_rbuf_full_err_cnt),
4431[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_rbuf_bad_lookup_err_cnt),
4434[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rbuf_ctx_id_parity_err_cnt),
4437[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rbuf_csr_qeopdw_parity_err_cnt),
4440[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4441 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4444[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4445 "RxRbufCsrQTlPtrParityErr", 0, 0,
4446 CNTR_NORMAL,
4447 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4448[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4449 0, CNTR_NORMAL,
4450 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4451[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4452 0, CNTR_NORMAL,
4453 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4454[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4455 0, 0, CNTR_NORMAL,
4456 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4457[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4458 0, CNTR_NORMAL,
4459 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4460[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4461 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4464[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4465 0, CNTR_NORMAL,
4466 access_rx_rbuf_block_list_read_cor_err_cnt),
4467[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4468 0, CNTR_NORMAL,
4469 access_rx_rbuf_block_list_read_unc_err_cnt),
4470[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rbuf_lookup_des_cor_err_cnt),
4473[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_lookup_des_unc_err_cnt),
4476[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4477 "RxRbufLookupDesRegUncCorErr", 0, 0,
4478 CNTR_NORMAL,
4479 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4480[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4481 CNTR_NORMAL,
4482 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4483[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4484 CNTR_NORMAL,
4485 access_rx_rbuf_free_list_cor_err_cnt),
4486[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4487 CNTR_NORMAL,
4488 access_rx_rbuf_free_list_unc_err_cnt),
4489[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4490 CNTR_NORMAL,
4491 access_rx_rcv_fsm_encoding_err_cnt),
4492[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_dma_flag_cor_err_cnt),
4495[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4496 CNTR_NORMAL,
4497 access_rx_dma_flag_unc_err_cnt),
4498[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4499 CNTR_NORMAL,
4500 access_rx_dc_sop_eop_parity_err_cnt),
4501[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rcv_csr_parity_err_cnt),
4504[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rcv_qp_map_table_cor_err_cnt),
4507[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4508 CNTR_NORMAL,
4509 access_rx_rcv_qp_map_table_unc_err_cnt),
4510[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rcv_data_cor_err_cnt),
4513[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_rx_rcv_data_unc_err_cnt),
4516[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_rx_rcv_hdr_cor_err_cnt),
4519[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_rx_rcv_hdr_unc_err_cnt),
4522[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_rx_dc_intf_parity_err_cnt),
4525[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rx_dma_csr_cor_err_cnt),
4528/* SendPioErrStatus */
4529[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_pio_pec_sop_head_parity_err_cnt),
4532[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_pio_pcc_sop_head_parity_err_cnt),
4535[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4536 0, 0, CNTR_NORMAL,
4537 access_pio_last_returned_cnt_parity_err_cnt),
4538[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4539 0, CNTR_NORMAL,
4540 access_pio_current_free_cnt_parity_err_cnt),
4541[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4542 CNTR_NORMAL,
4543 access_pio_reserved_31_err_cnt),
4544[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4545 CNTR_NORMAL,
4546 access_pio_reserved_30_err_cnt),
4547[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_pio_ppmc_sop_len_err_cnt),
4550[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_pio_ppmc_bqc_mem_parity_err_cnt),
4553[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_pio_vl_fifo_parity_err_cnt),
4556[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_pio_vlf_sop_parity_err_cnt),
4559[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4560 CNTR_NORMAL,
4561 access_pio_vlf_v1_len_parity_err_cnt),
4562[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4563 CNTR_NORMAL,
4564 access_pio_block_qw_count_parity_err_cnt),
4565[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4566 CNTR_NORMAL,
4567 access_pio_write_qw_valid_parity_err_cnt),
4568[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4569 CNTR_NORMAL,
4570 access_pio_state_machine_err_cnt),
4571[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4572 CNTR_NORMAL,
4573 access_pio_write_data_parity_err_cnt),
4574[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4575 CNTR_NORMAL,
4576 access_pio_host_addr_mem_cor_err_cnt),
4577[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4578 CNTR_NORMAL,
4579 access_pio_host_addr_mem_unc_err_cnt),
4580[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4583[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4584 CNTR_NORMAL,
4585 access_pio_init_sm_in_err_cnt),
4586[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_pio_ppmc_pbl_fifo_err_cnt),
4589[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4590 0, CNTR_NORMAL,
4591 access_pio_credit_ret_fifo_parity_err_cnt),
4592[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_v1_len_mem_bank1_cor_err_cnt),
4595[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_v1_len_mem_bank0_cor_err_cnt),
4598[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_v1_len_mem_bank1_unc_err_cnt),
4601[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_v1_len_mem_bank0_unc_err_cnt),
4604[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_sm_pkt_reset_parity_err_cnt),
4607[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_pkt_evict_fifo_parity_err_cnt),
4610[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4611 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4614[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_sbrdctl_crrel_parity_err_cnt),
4617[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_pec_fifo_parity_err_cnt),
4620[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4621 CNTR_NORMAL,
4622 access_pio_pcc_fifo_parity_err_cnt),
4623[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_sb_mem_fifo1_err_cnt),
4626[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_sb_mem_fifo0_err_cnt),
4629[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_csr_parity_err_cnt),
4632[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_write_addr_parity_err_cnt),
4635[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_write_bad_ctxt_err_cnt),
4638/* SendDmaErrStatus */
4639[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4640 0, CNTR_NORMAL,
4641 access_sdma_pcie_req_tracking_cor_err_cnt),
4642[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4643 0, CNTR_NORMAL,
4644 access_sdma_pcie_req_tracking_unc_err_cnt),
4645[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_sdma_csr_parity_err_cnt),
4648[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_sdma_rpy_tag_err_cnt),
4651/* SendEgressErrStatus */
4652[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4653 CNTR_NORMAL,
4654 access_tx_read_pio_memory_csr_unc_err_cnt),
4655[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4656 0, CNTR_NORMAL,
4657 access_tx_read_sdma_memory_csr_err_cnt),
4658[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4659 CNTR_NORMAL,
4660 access_tx_egress_fifo_cor_err_cnt),
4661[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_tx_read_pio_memory_cor_err_cnt),
4664[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_tx_read_sdma_memory_cor_err_cnt),
4667[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_tx_sb_hdr_cor_err_cnt),
4670[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_tx_credit_overrun_err_cnt),
4673[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_tx_launch_fifo8_cor_err_cnt),
4676[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_tx_launch_fifo7_cor_err_cnt),
4679[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_tx_launch_fifo6_cor_err_cnt),
4682[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_tx_launch_fifo5_cor_err_cnt),
4685[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_tx_launch_fifo4_cor_err_cnt),
4688[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_tx_launch_fifo3_cor_err_cnt),
4691[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_tx_launch_fifo2_cor_err_cnt),
4694[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_tx_launch_fifo1_cor_err_cnt),
4697[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_tx_launch_fifo0_cor_err_cnt),
4700[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_tx_credit_return_vl_err_cnt),
4703[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_tx_hcrc_insertion_err_cnt),
4706[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_tx_egress_fifo_unc_err_cnt),
4709[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_tx_read_pio_memory_unc_err_cnt),
4712[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_tx_read_sdma_memory_unc_err_cnt),
4715[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_tx_sb_hdr_unc_err_cnt),
4718[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_tx_credit_return_partiy_err_cnt),
4721[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4722 0, 0, CNTR_NORMAL,
4723 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4724[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4725 0, 0, CNTR_NORMAL,
4726 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4727[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4728 0, 0, CNTR_NORMAL,
4729 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4730[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4731 0, 0, CNTR_NORMAL,
4732 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4733[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4734 0, 0, CNTR_NORMAL,
4735 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4736[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4737 0, 0, CNTR_NORMAL,
4738 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4739[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4740 0, 0, CNTR_NORMAL,
4741 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4742[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4743 0, 0, CNTR_NORMAL,
4744 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4745[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4746 0, 0, CNTR_NORMAL,
4747 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4748[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4749 0, 0, CNTR_NORMAL,
4750 access_tx_sdma15_disallowed_packet_err_cnt),
4751[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4752 0, 0, CNTR_NORMAL,
4753 access_tx_sdma14_disallowed_packet_err_cnt),
4754[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4755 0, 0, CNTR_NORMAL,
4756 access_tx_sdma13_disallowed_packet_err_cnt),
4757[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4758 0, 0, CNTR_NORMAL,
4759 access_tx_sdma12_disallowed_packet_err_cnt),
4760[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4761 0, 0, CNTR_NORMAL,
4762 access_tx_sdma11_disallowed_packet_err_cnt),
4763[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4764 0, 0, CNTR_NORMAL,
4765 access_tx_sdma10_disallowed_packet_err_cnt),
4766[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4767 0, 0, CNTR_NORMAL,
4768 access_tx_sdma9_disallowed_packet_err_cnt),
4769[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4770 0, 0, CNTR_NORMAL,
4771 access_tx_sdma8_disallowed_packet_err_cnt),
4772[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4773 0, 0, CNTR_NORMAL,
4774 access_tx_sdma7_disallowed_packet_err_cnt),
4775[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4776 0, 0, CNTR_NORMAL,
4777 access_tx_sdma6_disallowed_packet_err_cnt),
4778[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4779 0, 0, CNTR_NORMAL,
4780 access_tx_sdma5_disallowed_packet_err_cnt),
4781[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4782 0, 0, CNTR_NORMAL,
4783 access_tx_sdma4_disallowed_packet_err_cnt),
4784[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4785 0, 0, CNTR_NORMAL,
4786 access_tx_sdma3_disallowed_packet_err_cnt),
4787[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4788 0, 0, CNTR_NORMAL,
4789 access_tx_sdma2_disallowed_packet_err_cnt),
4790[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4791 0, 0, CNTR_NORMAL,
4792 access_tx_sdma1_disallowed_packet_err_cnt),
4793[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4794 0, 0, CNTR_NORMAL,
4795 access_tx_sdma0_disallowed_packet_err_cnt),
4796[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_config_parity_err_cnt),
4799[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_sbrd_ctl_csr_parity_err_cnt),
4802[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_launch_csr_parity_err_cnt),
4805[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_tx_illegal_vl_err_cnt),
4808[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4809 "TxSbrdCtlStateMachineParityErr", 0, 0,
4810 CNTR_NORMAL,
4811 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4812[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4813 CNTR_NORMAL,
4814 access_egress_reserved_10_err_cnt),
4815[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4816 CNTR_NORMAL,
4817 access_egress_reserved_9_err_cnt),
4818[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma_launch_intf_parity_err_cnt),
4821[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_pio_launch_intf_parity_err_cnt),
4824[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4825 CNTR_NORMAL,
4826 access_egress_reserved_6_err_cnt),
4827[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_incorrect_link_state_err_cnt),
4830[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_linkdown_err_cnt),
4833[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4834 "EgressFifoUnderrunOrParityErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4837[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4838 CNTR_NORMAL,
4839 access_egress_reserved_2_err_cnt),
4840[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_tx_pkt_integrity_mem_unc_err_cnt),
4843[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4844 CNTR_NORMAL,
4845 access_tx_pkt_integrity_mem_cor_err_cnt),
4846/* SendErrStatus */
4847[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_send_csr_write_bad_addr_err_cnt),
4850[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4851 CNTR_NORMAL,
4852 access_send_csr_read_bad_addr_err_cnt),
4853[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4854 CNTR_NORMAL,
4855 access_send_csr_parity_cnt),
4856/* SendCtxtErrStatus */
4857[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4858 CNTR_NORMAL,
4859 access_pio_write_out_of_bounds_err_cnt),
4860[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_pio_write_overflow_err_cnt),
4863[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4864 0, 0, CNTR_NORMAL,
4865 access_pio_write_crosses_boundary_err_cnt),
4866[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_pio_disallowed_packet_err_cnt),
4869[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4870 CNTR_NORMAL,
4871 access_pio_inconsistent_sop_err_cnt),
4872/* SendDmaEngErrStatus */
4873[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4874 0, 0, CNTR_NORMAL,
4875 access_sdma_header_request_fifo_cor_err_cnt),
4876[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4877 CNTR_NORMAL,
4878 access_sdma_header_storage_cor_err_cnt),
4879[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4880 CNTR_NORMAL,
4881 access_sdma_packet_tracking_cor_err_cnt),
4882[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4883 CNTR_NORMAL,
4884 access_sdma_assembly_cor_err_cnt),
4885[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_sdma_desc_table_cor_err_cnt),
4888[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4889 0, 0, CNTR_NORMAL,
4890 access_sdma_header_request_fifo_unc_err_cnt),
4891[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_sdma_header_storage_unc_err_cnt),
4894[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4895 CNTR_NORMAL,
4896 access_sdma_packet_tracking_unc_err_cnt),
4897[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_sdma_assembly_unc_err_cnt),
4900[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4901 CNTR_NORMAL,
4902 access_sdma_desc_table_unc_err_cnt),
4903[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4904 CNTR_NORMAL,
4905 access_sdma_timeout_err_cnt),
4906[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4907 CNTR_NORMAL,
4908 access_sdma_header_length_err_cnt),
4909[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4910 CNTR_NORMAL,
4911 access_sdma_header_address_err_cnt),
4912[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_sdma_header_select_err_cnt),
4915[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4916 CNTR_NORMAL,
4917 access_sdma_reserved_9_err_cnt),
4918[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_sdma_packet_desc_overflow_err_cnt),
4921[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_sdma_length_mismatch_err_cnt),
4924[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_sdma_halt_err_cnt),
4927[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4928 CNTR_NORMAL,
4929 access_sdma_mem_read_err_cnt),
4930[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_sdma_first_desc_err_cnt),
4933[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_sdma_tail_out_of_bounds_err_cnt),
4936[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4937 CNTR_NORMAL,
4938 access_sdma_too_long_err_cnt),
4939[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4940 CNTR_NORMAL,
4941 access_sdma_gen_mismatch_err_cnt),
4942[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4943 CNTR_NORMAL,
4944 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004945};
4946
4947static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4948[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4949 CNTR_NORMAL),
4950[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4951 CNTR_NORMAL),
4952[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4953 CNTR_NORMAL),
4954[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4955 CNTR_NORMAL),
4956[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4957 CNTR_NORMAL),
4958[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4959 CNTR_NORMAL),
4960[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4961 CNTR_NORMAL),
4962[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4963[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4964[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4965[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004966 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004967[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004968 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004969[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004970 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004971[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4972[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4973[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004974 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004975[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004976 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004977[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4978 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004979[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004980 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004981[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08004982 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4983 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004984[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004985 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004986[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004987 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004988[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4989[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4990[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4991[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4992[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4993[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4994[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4995[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4996[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4997[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4998[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4999[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5000[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5001 access_sw_cpu_rc_acks),
5002[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005003 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005004[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005005 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005006[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5007[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5008[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5009[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5010[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5011[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5012[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5013[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5014[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5015[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5016[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5017[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5018[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5019[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5020[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5021[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5022[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5023[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5024[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5025[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5026[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5027[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5028[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5029[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5030[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5031[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5032[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5033[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5034[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5035[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5036[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5037[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5038[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5039[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5040[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5041[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5042[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5043[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5044[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5045[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5046[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5047[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5048[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5049[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5050[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5051[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5052[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5053[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5054[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5055[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5056[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5057[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5058[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5059[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5060[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5061[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5062[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5063[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5064[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5065[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5066[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5067[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5068[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5069[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5070[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5071[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5072[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5073[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5074[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5075[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5076[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5077[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5078[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5079[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5080[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5081[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5082[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5083[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5084[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5085[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5086};
5087
5088/* ======================================================================== */
5089
Mike Marciniszyn77241052015-07-30 15:17:43 -04005090/* return true if this is chip revision revision a */
5091int is_ax(struct hfi1_devdata *dd)
5092{
5093 u8 chip_rev_minor =
5094 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5095 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5096 return (chip_rev_minor & 0xf0) == 0;
5097}
5098
5099/* return true if this is chip revision revision b */
5100int is_bx(struct hfi1_devdata *dd)
5101{
5102 u8 chip_rev_minor =
5103 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5104 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005105 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005106}
5107
5108/*
5109 * Append string s to buffer buf. Arguments curp and len are the current
5110 * position and remaining length, respectively.
5111 *
5112 * return 0 on success, 1 on out of room
5113 */
5114static int append_str(char *buf, char **curp, int *lenp, const char *s)
5115{
5116 char *p = *curp;
5117 int len = *lenp;
5118 int result = 0; /* success */
5119 char c;
5120
5121 /* add a comma, if first in the buffer */
5122 if (p != buf) {
5123 if (len == 0) {
5124 result = 1; /* out of room */
5125 goto done;
5126 }
5127 *p++ = ',';
5128 len--;
5129 }
5130
5131 /* copy the string */
5132 while ((c = *s++) != 0) {
5133 if (len == 0) {
5134 result = 1; /* out of room */
5135 goto done;
5136 }
5137 *p++ = c;
5138 len--;
5139 }
5140
5141done:
5142 /* write return values */
5143 *curp = p;
5144 *lenp = len;
5145
5146 return result;
5147}
5148
5149/*
5150 * Using the given flag table, print a comma separated string into
5151 * the buffer. End in '*' if the buffer is too short.
5152 */
5153static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005154 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005155{
5156 char extra[32];
5157 char *p = buf;
5158 int len = buf_len;
5159 int no_room = 0;
5160 int i;
5161
5162 /* make sure there is at least 2 so we can form "*" */
5163 if (len < 2)
5164 return "";
5165
5166 len--; /* leave room for a nul */
5167 for (i = 0; i < table_size; i++) {
5168 if (flags & table[i].flag) {
5169 no_room = append_str(buf, &p, &len, table[i].str);
5170 if (no_room)
5171 break;
5172 flags &= ~table[i].flag;
5173 }
5174 }
5175
5176 /* any undocumented bits left? */
5177 if (!no_room && flags) {
5178 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5179 no_room = append_str(buf, &p, &len, extra);
5180 }
5181
5182 /* add * if ran out of room */
5183 if (no_room) {
5184 /* may need to back up to add space for a '*' */
5185 if (len == 0)
5186 --p;
5187 *p++ = '*';
5188 }
5189
5190 /* add final nul - space already allocated above */
5191 *p = 0;
5192 return buf;
5193}
5194
5195/* first 8 CCE error interrupt source names */
5196static const char * const cce_misc_names[] = {
5197 "CceErrInt", /* 0 */
5198 "RxeErrInt", /* 1 */
5199 "MiscErrInt", /* 2 */
5200 "Reserved3", /* 3 */
5201 "PioErrInt", /* 4 */
5202 "SDmaErrInt", /* 5 */
5203 "EgressErrInt", /* 6 */
5204 "TxeErrInt" /* 7 */
5205};
5206
5207/*
5208 * Return the miscellaneous error interrupt name.
5209 */
5210static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5211{
5212 if (source < ARRAY_SIZE(cce_misc_names))
5213 strncpy(buf, cce_misc_names[source], bsize);
5214 else
Jubin John17fb4f22016-02-14 20:21:52 -08005215 snprintf(buf, bsize, "Reserved%u",
5216 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005217
5218 return buf;
5219}
5220
5221/*
5222 * Return the SDMA engine error interrupt name.
5223 */
5224static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5225{
5226 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5227 return buf;
5228}
5229
5230/*
5231 * Return the send context error interrupt name.
5232 */
5233static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5234{
5235 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5236 return buf;
5237}
5238
5239static const char * const various_names[] = {
5240 "PbcInt",
5241 "GpioAssertInt",
5242 "Qsfp1Int",
5243 "Qsfp2Int",
5244 "TCritInt"
5245};
5246
5247/*
5248 * Return the various interrupt name.
5249 */
5250static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5251{
5252 if (source < ARRAY_SIZE(various_names))
5253 strncpy(buf, various_names[source], bsize);
5254 else
Jubin John8638b772016-02-14 20:19:24 -08005255 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005256 return buf;
5257}
5258
5259/*
5260 * Return the DC interrupt name.
5261 */
5262static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5263{
5264 static const char * const dc_int_names[] = {
5265 "common",
5266 "lcb",
5267 "8051",
5268 "lbm" /* local block merge */
5269 };
5270
5271 if (source < ARRAY_SIZE(dc_int_names))
5272 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5273 else
5274 snprintf(buf, bsize, "DCInt%u", source);
5275 return buf;
5276}
5277
5278static const char * const sdma_int_names[] = {
5279 "SDmaInt",
5280 "SdmaIdleInt",
5281 "SdmaProgressInt",
5282};
5283
5284/*
5285 * Return the SDMA engine interrupt name.
5286 */
5287static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5288{
5289 /* what interrupt */
5290 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5291 /* which engine */
5292 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5293
5294 if (likely(what < 3))
5295 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5296 else
5297 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5298 return buf;
5299}
5300
5301/*
5302 * Return the receive available interrupt name.
5303 */
5304static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5305{
5306 snprintf(buf, bsize, "RcvAvailInt%u", source);
5307 return buf;
5308}
5309
5310/*
5311 * Return the receive urgent interrupt name.
5312 */
5313static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5314{
5315 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5316 return buf;
5317}
5318
5319/*
5320 * Return the send credit interrupt name.
5321 */
5322static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5323{
5324 snprintf(buf, bsize, "SendCreditInt%u", source);
5325 return buf;
5326}
5327
5328/*
5329 * Return the reserved interrupt name.
5330 */
5331static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5332{
5333 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5334 return buf;
5335}
5336
5337static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5338{
5339 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005340 cce_err_status_flags,
5341 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005342}
5343
5344static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005347 rxe_err_status_flags,
5348 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005349}
5350
5351static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5352{
5353 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005354 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005355}
5356
5357static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5358{
5359 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005360 pio_err_status_flags,
5361 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005362}
5363
5364static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5365{
5366 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005367 sdma_err_status_flags,
5368 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005369}
5370
5371static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5372{
5373 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005374 egress_err_status_flags,
5375 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005376}
5377
5378static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5379{
5380 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005381 egress_err_info_flags,
5382 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005383}
5384
5385static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5386{
5387 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005388 send_err_status_flags,
5389 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005390}
5391
5392static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5393{
5394 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005395 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005396
5397 /*
5398 * For most these errors, there is nothing that can be done except
5399 * report or record it.
5400 */
5401 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005402 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005403
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005404 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5405 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005406 /* this error requires a manual drop into SPC freeze mode */
5407 /* then a fix up */
5408 start_freeze_handling(dd->pport, FREEZE_SELF);
5409 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005410
5411 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5412 if (reg & (1ull << i)) {
5413 incr_cntr64(&dd->cce_err_status_cnt[i]);
5414 /* maintain a counter over all cce_err_status errors */
5415 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5416 }
5417 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005418}
5419
5420/*
5421 * Check counters for receive errors that do not have an interrupt
5422 * associated with them.
5423 */
5424#define RCVERR_CHECK_TIME 10
5425static void update_rcverr_timer(unsigned long opaque)
5426{
5427 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5428 struct hfi1_pportdata *ppd = dd->pport;
5429 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5430
5431 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005432 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005433 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005434 set_link_down_reason(
5435 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5436 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005437 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5438 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005439 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005440
5441 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5442}
5443
5444static int init_rcverr(struct hfi1_devdata *dd)
5445{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305446 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005447 /* Assume the hardware counter has been reset */
5448 dd->rcv_ovfl_cnt = 0;
5449 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5450}
5451
5452static void free_rcverr(struct hfi1_devdata *dd)
5453{
5454 if (dd->rcverr_timer.data)
5455 del_timer_sync(&dd->rcverr_timer);
5456 dd->rcverr_timer.data = 0;
5457}
5458
5459static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5460{
5461 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005462 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005463
5464 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005465 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005466
5467 if (reg & ALL_RXE_FREEZE_ERR) {
5468 int flags = 0;
5469
5470 /*
5471 * Freeze mode recovery is disabled for the errors
5472 * in RXE_FREEZE_ABORT_MASK
5473 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005474 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005475 flags = FREEZE_ABORT;
5476
5477 start_freeze_handling(dd->pport, flags);
5478 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005479
5480 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5481 if (reg & (1ull << i))
5482 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5483 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005484}
5485
5486static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5487{
5488 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005489 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005490
5491 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005492 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005493 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5494 if (reg & (1ull << i))
5495 incr_cntr64(&dd->misc_err_status_cnt[i]);
5496 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005497}
5498
5499static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5500{
5501 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005502 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005503
5504 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005505 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005506
5507 if (reg & ALL_PIO_FREEZE_ERR)
5508 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005509
5510 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5511 if (reg & (1ull << i))
5512 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5513 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005514}
5515
5516static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5517{
5518 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005519 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005520
5521 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005522 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005523
5524 if (reg & ALL_SDMA_FREEZE_ERR)
5525 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005526
5527 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5528 if (reg & (1ull << i))
5529 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5530 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005531}
5532
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005533static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5534{
5535 incr_cntr64(&ppd->port_xmit_discards);
5536}
5537
Mike Marciniszyn77241052015-07-30 15:17:43 -04005538static void count_port_inactive(struct hfi1_devdata *dd)
5539{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005540 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005541}
5542
5543/*
5544 * We have had a "disallowed packet" error during egress. Determine the
5545 * integrity check which failed, and update relevant error counter, etc.
5546 *
5547 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5548 * bit of state per integrity check, and so we can miss the reason for an
5549 * egress error if more than one packet fails the same integrity check
5550 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5551 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005552static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5553 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005554{
5555 struct hfi1_pportdata *ppd = dd->pport;
5556 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5557 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5558 char buf[96];
5559
5560 /* clear down all observed info as quickly as possible after read */
5561 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5562
5563 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005564 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5565 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005566
5567 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005568 if (info & PORT_DISCARD_EGRESS_ERRS) {
5569 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005570
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005571 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005572 * Count all applicable bits as individual errors and
5573 * attribute them to the packet that triggered this handler.
5574 * This may not be completely accurate due to limitations
5575 * on the available hardware error information. There is
5576 * a single information register and any number of error
5577 * packets may have occurred and contributed to it before
5578 * this routine is called. This means that:
5579 * a) If multiple packets with the same error occur before
5580 * this routine is called, earlier packets are missed.
5581 * There is only a single bit for each error type.
5582 * b) Errors may not be attributed to the correct VL.
5583 * The driver is attributing all bits in the info register
5584 * to the packet that triggered this call, but bits
5585 * could be an accumulation of different packets with
5586 * different VLs.
5587 * c) A single error packet may have multiple counts attached
5588 * to it. There is no way for the driver to know if
5589 * multiple bits set in the info register are due to a
5590 * single packet or multiple packets. The driver assumes
5591 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005592 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005593 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005594 for (i = 0; i < weight; i++) {
5595 __count_port_discards(ppd);
5596 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5597 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5598 else if (vl == 15)
5599 incr_cntr64(&ppd->port_xmit_discards_vl
5600 [C_VL_15]);
5601 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005602 }
5603}
5604
5605/*
5606 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5607 * register. Does it represent a 'port inactive' error?
5608 */
5609static inline int port_inactive_err(u64 posn)
5610{
5611 return (posn >= SEES(TX_LINKDOWN) &&
5612 posn <= SEES(TX_INCORRECT_LINK_STATE));
5613}
5614
5615/*
5616 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5617 * register. Does it represent a 'disallowed packet' error?
5618 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005619static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005620{
5621 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5622 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5623}
5624
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005625/*
5626 * Input value is a bit position of one of the SDMA engine disallowed
5627 * packet errors. Return which engine. Use of this must be guarded by
5628 * disallowed_pkt_err().
5629 */
5630static inline int disallowed_pkt_engine(int posn)
5631{
5632 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5633}
5634
5635/*
5636 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5637 * be done.
5638 */
5639static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5640{
5641 struct sdma_vl_map *m;
5642 int vl;
5643
5644 /* range check */
5645 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5646 return -1;
5647
5648 rcu_read_lock();
5649 m = rcu_dereference(dd->sdma_map);
5650 vl = m->engine_to_vl[engine];
5651 rcu_read_unlock();
5652
5653 return vl;
5654}
5655
5656/*
5657 * Translate the send context (sofware index) into a VL. Return -1 if the
5658 * translation cannot be done.
5659 */
5660static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5661{
5662 struct send_context_info *sci;
5663 struct send_context *sc;
5664 int i;
5665
5666 sci = &dd->send_contexts[sw_index];
5667
5668 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005669 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005670 return -1;
5671
5672 sc = sci->sc;
5673 if (!sc)
5674 return -1;
5675 if (dd->vld[15].sc == sc)
5676 return 15;
5677 for (i = 0; i < num_vls; i++)
5678 if (dd->vld[i].sc == sc)
5679 return i;
5680
5681 return -1;
5682}
5683
Mike Marciniszyn77241052015-07-30 15:17:43 -04005684static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5685{
5686 u64 reg_copy = reg, handled = 0;
5687 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005688 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005689
5690 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5691 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005692 else if (is_ax(dd) &&
5693 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5694 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005695 start_freeze_handling(dd->pport, 0);
5696
5697 while (reg_copy) {
5698 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005699 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005700 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005701 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005702
5703 if (port_inactive_err(shift)) {
5704 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005705 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005706 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005707 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5708
5709 handle_send_egress_err_info(dd, vl);
5710 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005711 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005712 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005713 }
5714
5715 reg &= ~handled;
5716
5717 if (reg)
5718 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005719 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005720
5721 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5722 if (reg & (1ull << i))
5723 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5724 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005725}
5726
5727static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5728{
5729 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005730 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005731
5732 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005733 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005734
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005735 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5736 if (reg & (1ull << i))
5737 incr_cntr64(&dd->send_err_status_cnt[i]);
5738 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005739}
5740
5741/*
5742 * The maximum number of times the error clear down will loop before
5743 * blocking a repeating error. This value is arbitrary.
5744 */
5745#define MAX_CLEAR_COUNT 20
5746
5747/*
5748 * Clear and handle an error register. All error interrupts are funneled
5749 * through here to have a central location to correctly handle single-
5750 * or multi-shot errors.
5751 *
5752 * For non per-context registers, call this routine with a context value
5753 * of 0 so the per-context offset is zero.
5754 *
5755 * If the handler loops too many times, assume that something is wrong
5756 * and can't be fixed, so mask the error bits.
5757 */
5758static void interrupt_clear_down(struct hfi1_devdata *dd,
5759 u32 context,
5760 const struct err_reg_info *eri)
5761{
5762 u64 reg;
5763 u32 count;
5764
5765 /* read in a loop until no more errors are seen */
5766 count = 0;
5767 while (1) {
5768 reg = read_kctxt_csr(dd, context, eri->status);
5769 if (reg == 0)
5770 break;
5771 write_kctxt_csr(dd, context, eri->clear, reg);
5772 if (likely(eri->handler))
5773 eri->handler(dd, context, reg);
5774 count++;
5775 if (count > MAX_CLEAR_COUNT) {
5776 u64 mask;
5777
5778 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005779 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005780 /*
5781 * Read-modify-write so any other masked bits
5782 * remain masked.
5783 */
5784 mask = read_kctxt_csr(dd, context, eri->mask);
5785 mask &= ~reg;
5786 write_kctxt_csr(dd, context, eri->mask, mask);
5787 break;
5788 }
5789 }
5790}
5791
5792/*
5793 * CCE block "misc" interrupt. Source is < 16.
5794 */
5795static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5796{
5797 const struct err_reg_info *eri = &misc_errs[source];
5798
5799 if (eri->handler) {
5800 interrupt_clear_down(dd, 0, eri);
5801 } else {
5802 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005803 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005804 }
5805}
5806
5807static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5808{
5809 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005810 sc_err_status_flags,
5811 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005812}
5813
5814/*
5815 * Send context error interrupt. Source (hw_context) is < 160.
5816 *
5817 * All send context errors cause the send context to halt. The normal
5818 * clear-down mechanism cannot be used because we cannot clear the
5819 * error bits until several other long-running items are done first.
5820 * This is OK because with the context halted, nothing else is going
5821 * to happen on it anyway.
5822 */
5823static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5824 unsigned int hw_context)
5825{
5826 struct send_context_info *sci;
5827 struct send_context *sc;
5828 char flags[96];
5829 u64 status;
5830 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005831 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005832
5833 sw_index = dd->hw_to_sw[hw_context];
5834 if (sw_index >= dd->num_send_contexts) {
5835 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005836 "out of range sw index %u for send context %u\n",
5837 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005838 return;
5839 }
5840 sci = &dd->send_contexts[sw_index];
5841 sc = sci->sc;
5842 if (!sc) {
5843 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005844 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005845 return;
5846 }
5847
5848 /* tell the software that a halt has begun */
5849 sc_stop(sc, SCF_HALTED);
5850
5851 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5852
5853 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005854 send_context_err_status_string(flags, sizeof(flags),
5855 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005856
5857 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005858 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005859
5860 /*
5861 * Automatically restart halted kernel contexts out of interrupt
5862 * context. User contexts must ask the driver to restart the context.
5863 */
5864 if (sc->type != SC_USER)
5865 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005866
5867 /*
5868 * Update the counters for the corresponding status bits.
5869 * Note that these particular counters are aggregated over all
5870 * 160 contexts.
5871 */
5872 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5873 if (status & (1ull << i))
5874 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5875 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005876}
5877
5878static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5879 unsigned int source, u64 status)
5880{
5881 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005882 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005883
5884 sde = &dd->per_sdma[source];
5885#ifdef CONFIG_SDMA_VERBOSITY
5886 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5887 slashstrip(__FILE__), __LINE__, __func__);
5888 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5889 sde->this_idx, source, (unsigned long long)status);
5890#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005891 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005892 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005893
5894 /*
5895 * Update the counters for the corresponding status bits.
5896 * Note that these particular counters are aggregated over
5897 * all 16 DMA engines.
5898 */
5899 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5900 if (status & (1ull << i))
5901 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5902 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005903}
5904
5905/*
5906 * CCE block SDMA error interrupt. Source is < 16.
5907 */
5908static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5909{
5910#ifdef CONFIG_SDMA_VERBOSITY
5911 struct sdma_engine *sde = &dd->per_sdma[source];
5912
5913 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5914 slashstrip(__FILE__), __LINE__, __func__);
5915 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5916 source);
5917 sdma_dumpstate(sde);
5918#endif
5919 interrupt_clear_down(dd, source, &sdma_eng_err);
5920}
5921
5922/*
5923 * CCE block "various" interrupt. Source is < 8.
5924 */
5925static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5926{
5927 const struct err_reg_info *eri = &various_err[source];
5928
5929 /*
5930 * TCritInt cannot go through interrupt_clear_down()
5931 * because it is not a second tier interrupt. The handler
5932 * should be called directly.
5933 */
5934 if (source == TCRIT_INT_SOURCE)
5935 handle_temp_err(dd);
5936 else if (eri->handler)
5937 interrupt_clear_down(dd, 0, eri);
5938 else
5939 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005940 "%s: Unimplemented/reserved interrupt %d\n",
5941 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005942}
5943
5944static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5945{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005946 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005947 struct hfi1_pportdata *ppd = dd->pport;
5948 unsigned long flags;
5949 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5950
5951 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005952 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005953 dd_dev_info(dd, "%s: QSFP module removed\n",
5954 __func__);
5955
Mike Marciniszyn77241052015-07-30 15:17:43 -04005956 ppd->driver_link_ready = 0;
5957 /*
5958 * Cable removed, reset all our information about the
5959 * cache and cable capabilities
5960 */
5961
5962 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5963 /*
5964 * We don't set cache_refresh_required here as we expect
5965 * an interrupt when a cable is inserted
5966 */
5967 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005968 ppd->qsfp_info.reset_needed = 0;
5969 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005970 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08005971 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005972 /* Invert the ModPresent pin now to detect plug-in */
5973 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5974 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005975
5976 if ((ppd->offline_disabled_reason >
5977 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005978 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005979 (ppd->offline_disabled_reason ==
5980 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5981 ppd->offline_disabled_reason =
5982 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005983 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005984
Mike Marciniszyn77241052015-07-30 15:17:43 -04005985 if (ppd->host_link_state == HLS_DN_POLL) {
5986 /*
5987 * The link is still in POLL. This means
5988 * that the normal link down processing
5989 * will not happen. We have to do it here
5990 * before turning the DC off.
5991 */
5992 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5993 }
5994 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005995 dd_dev_info(dd, "%s: QSFP module inserted\n",
5996 __func__);
5997
Mike Marciniszyn77241052015-07-30 15:17:43 -04005998 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5999 ppd->qsfp_info.cache_valid = 0;
6000 ppd->qsfp_info.cache_refresh_required = 1;
6001 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006002 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006003
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006004 /*
6005 * Stop inversion of ModPresent pin to detect
6006 * removal of the cable
6007 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006008 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006009 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6010 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6011
6012 ppd->offline_disabled_reason =
6013 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006014 }
6015 }
6016
6017 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006018 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006019 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006020 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6021 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006022 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6023 }
6024
6025 /* Schedule the QSFP work only if there is a cable attached. */
6026 if (qsfp_mod_present(ppd))
6027 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6028}
6029
6030static int request_host_lcb_access(struct hfi1_devdata *dd)
6031{
6032 int ret;
6033
6034 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006035 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6036 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006037 if (ret != HCMD_SUCCESS) {
6038 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006039 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006040 }
6041 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6042}
6043
6044static int request_8051_lcb_access(struct hfi1_devdata *dd)
6045{
6046 int ret;
6047
6048 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006049 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6050 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006051 if (ret != HCMD_SUCCESS) {
6052 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006053 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006054 }
6055 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6056}
6057
6058/*
6059 * Set the LCB selector - allow host access. The DCC selector always
6060 * points to the host.
6061 */
6062static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6063{
6064 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006065 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6066 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006067}
6068
6069/*
6070 * Clear the LCB selector - allow 8051 access. The DCC selector always
6071 * points to the host.
6072 */
6073static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6074{
6075 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006076 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006077}
6078
6079/*
6080 * Acquire LCB access from the 8051. If the host already has access,
6081 * just increment a counter. Otherwise, inform the 8051 that the
6082 * host is taking access.
6083 *
6084 * Returns:
6085 * 0 on success
6086 * -EBUSY if the 8051 has control and cannot be disturbed
6087 * -errno if unable to acquire access from the 8051
6088 */
6089int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6090{
6091 struct hfi1_pportdata *ppd = dd->pport;
6092 int ret = 0;
6093
6094 /*
6095 * Use the host link state lock so the operation of this routine
6096 * { link state check, selector change, count increment } can occur
6097 * as a unit against a link state change. Otherwise there is a
6098 * race between the state change and the count increment.
6099 */
6100 if (sleep_ok) {
6101 mutex_lock(&ppd->hls_lock);
6102 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006103 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006104 udelay(1);
6105 }
6106
6107 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006108 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006109 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006110 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006111 ret = -EBUSY;
6112 goto done;
6113 }
6114
6115 if (dd->lcb_access_count == 0) {
6116 ret = request_host_lcb_access(dd);
6117 if (ret) {
6118 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006119 "%s: unable to acquire LCB access, err %d\n",
6120 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006121 goto done;
6122 }
6123 set_host_lcb_access(dd);
6124 }
6125 dd->lcb_access_count++;
6126done:
6127 mutex_unlock(&ppd->hls_lock);
6128 return ret;
6129}
6130
6131/*
6132 * Release LCB access by decrementing the use count. If the count is moving
6133 * from 1 to 0, inform 8051 that it has control back.
6134 *
6135 * Returns:
6136 * 0 on success
6137 * -errno if unable to release access to the 8051
6138 */
6139int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6140{
6141 int ret = 0;
6142
6143 /*
6144 * Use the host link state lock because the acquire needed it.
6145 * Here, we only need to keep { selector change, count decrement }
6146 * as a unit.
6147 */
6148 if (sleep_ok) {
6149 mutex_lock(&dd->pport->hls_lock);
6150 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006151 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006152 udelay(1);
6153 }
6154
6155 if (dd->lcb_access_count == 0) {
6156 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006157 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006158 goto done;
6159 }
6160
6161 if (dd->lcb_access_count == 1) {
6162 set_8051_lcb_access(dd);
6163 ret = request_8051_lcb_access(dd);
6164 if (ret) {
6165 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006166 "%s: unable to release LCB access, err %d\n",
6167 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006168 /* restore host access if the grant didn't work */
6169 set_host_lcb_access(dd);
6170 goto done;
6171 }
6172 }
6173 dd->lcb_access_count--;
6174done:
6175 mutex_unlock(&dd->pport->hls_lock);
6176 return ret;
6177}
6178
6179/*
6180 * Initialize LCB access variables and state. Called during driver load,
6181 * after most of the initialization is finished.
6182 *
6183 * The DC default is LCB access on for the host. The driver defaults to
6184 * leaving access to the 8051. Assign access now - this constrains the call
6185 * to this routine to be after all LCB set-up is done. In particular, after
6186 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6187 */
6188static void init_lcb_access(struct hfi1_devdata *dd)
6189{
6190 dd->lcb_access_count = 0;
6191}
6192
6193/*
6194 * Write a response back to a 8051 request.
6195 */
6196static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6197{
6198 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006199 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6200 (u64)return_code <<
6201 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6202 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006203}
6204
6205/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006206 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006207 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006208static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006209{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006210 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006211 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006212 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006213 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006214
6215 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6216 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6217 return; /* no request */
6218
6219 /* zero out COMPLETED so the response is seen */
6220 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6221
6222 /* extract request details */
6223 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6224 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6225 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6226 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6227
6228 switch (type) {
6229 case HREQ_LOAD_CONFIG:
6230 case HREQ_SAVE_CONFIG:
6231 case HREQ_READ_CONFIG:
6232 case HREQ_SET_TX_EQ_ABS:
6233 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006234 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006235 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006236 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006237 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6238 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006239 case HREQ_CONFIG_DONE:
6240 hreq_response(dd, HREQ_SUCCESS, 0);
6241 break;
6242
6243 case HREQ_INTERFACE_TEST:
6244 hreq_response(dd, HREQ_SUCCESS, data);
6245 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006246 default:
6247 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6248 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6249 break;
6250 }
6251}
6252
6253static void write_global_credit(struct hfi1_devdata *dd,
6254 u8 vau, u16 total, u16 shared)
6255{
6256 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006257 ((u64)total <<
6258 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6259 ((u64)shared <<
6260 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6261 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006262}
6263
6264/*
6265 * Set up initial VL15 credits of the remote. Assumes the rest of
6266 * the CM credit registers are zero from a previous global or credit reset .
6267 */
6268void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6269{
6270 /* leave shared count at zero for both global and VL15 */
6271 write_global_credit(dd, vau, vl15buf, 0);
6272
6273 /* We may need some credits for another VL when sending packets
6274 * with the snoop interface. Dividing it down the middle for VL15
6275 * and VL0 should suffice.
6276 */
6277 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6278 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6279 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6280 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6281 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6282 } else {
6283 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6284 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6285 }
6286}
6287
6288/*
6289 * Zero all credit details from the previous connection and
6290 * reset the CM manager's internal counters.
6291 */
6292void reset_link_credits(struct hfi1_devdata *dd)
6293{
6294 int i;
6295
6296 /* remove all previous VL credit limits */
6297 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006298 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006299 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6300 write_global_credit(dd, 0, 0, 0);
6301 /* reset the CM block */
6302 pio_send_control(dd, PSC_CM_RESET);
6303}
6304
6305/* convert a vCU to a CU */
6306static u32 vcu_to_cu(u8 vcu)
6307{
6308 return 1 << vcu;
6309}
6310
6311/* convert a CU to a vCU */
6312static u8 cu_to_vcu(u32 cu)
6313{
6314 return ilog2(cu);
6315}
6316
6317/* convert a vAU to an AU */
6318static u32 vau_to_au(u8 vau)
6319{
6320 return 8 * (1 << vau);
6321}
6322
6323static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6324{
6325 ppd->sm_trap_qp = 0x0;
6326 ppd->sa_qp = 0x1;
6327}
6328
6329/*
6330 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6331 */
6332static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6333{
6334 u64 reg;
6335
6336 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6337 write_csr(dd, DC_LCB_CFG_RUN, 0);
6338 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6339 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006340 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006341 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6342 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6343 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006344 write_csr(dd, DCC_CFG_RESET, reg |
6345 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6346 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006347 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006348 if (!abort) {
6349 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6350 write_csr(dd, DCC_CFG_RESET, reg);
6351 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6352 }
6353}
6354
6355/*
6356 * This routine should be called after the link has been transitioned to
6357 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6358 * reset).
6359 *
6360 * The expectation is that the caller of this routine would have taken
6361 * care of properly transitioning the link into the correct state.
6362 */
6363static void dc_shutdown(struct hfi1_devdata *dd)
6364{
6365 unsigned long flags;
6366
6367 spin_lock_irqsave(&dd->dc8051_lock, flags);
6368 if (dd->dc_shutdown) {
6369 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6370 return;
6371 }
6372 dd->dc_shutdown = 1;
6373 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6374 /* Shutdown the LCB */
6375 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006376 /*
6377 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006378 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006379 * itself.
6380 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006381 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6382}
6383
Jubin John4d114fd2016-02-14 20:21:43 -08006384/*
6385 * Calling this after the DC has been brought out of reset should not
6386 * do any damage.
6387 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006388static void dc_start(struct hfi1_devdata *dd)
6389{
6390 unsigned long flags;
6391 int ret;
6392
6393 spin_lock_irqsave(&dd->dc8051_lock, flags);
6394 if (!dd->dc_shutdown)
6395 goto done;
6396 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6397 /* Take the 8051 out of reset */
6398 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6399 /* Wait until 8051 is ready */
6400 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6401 if (ret) {
6402 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006403 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006404 }
6405 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6406 write_csr(dd, DCC_CFG_RESET, 0x10);
6407 /* lcb_shutdown() with abort=1 does not restore these */
6408 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6409 spin_lock_irqsave(&dd->dc8051_lock, flags);
6410 dd->dc_shutdown = 0;
6411done:
6412 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6413}
6414
6415/*
6416 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6417 */
6418static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6419{
6420 u64 rx_radr, tx_radr;
6421 u32 version;
6422
6423 if (dd->icode != ICODE_FPGA_EMULATION)
6424 return;
6425
6426 /*
6427 * These LCB defaults on emulator _s are good, nothing to do here:
6428 * LCB_CFG_TX_FIFOS_RADR
6429 * LCB_CFG_RX_FIFOS_RADR
6430 * LCB_CFG_LN_DCLK
6431 * LCB_CFG_IGNORE_LOST_RCLK
6432 */
6433 if (is_emulator_s(dd))
6434 return;
6435 /* else this is _p */
6436
6437 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006438 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006439 version = 0x2d; /* all B0 use 0x2d or higher settings */
6440
6441 if (version <= 0x12) {
6442 /* release 0x12 and below */
6443
6444 /*
6445 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6446 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6447 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6448 */
6449 rx_radr =
6450 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6451 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6452 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6453 /*
6454 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6455 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6456 */
6457 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6458 } else if (version <= 0x18) {
6459 /* release 0x13 up to 0x18 */
6460 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6461 rx_radr =
6462 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6463 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6464 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6465 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6466 } else if (version == 0x19) {
6467 /* release 0x19 */
6468 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6469 rx_radr =
6470 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6474 } else if (version == 0x1a) {
6475 /* release 0x1a */
6476 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6477 rx_radr =
6478 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6479 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6480 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6481 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6482 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6483 } else {
6484 /* release 0x1b and higher */
6485 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6486 rx_radr =
6487 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6488 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6489 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6490 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6491 }
6492
6493 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6494 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6495 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006496 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006497 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6498}
6499
6500/*
6501 * Handle a SMA idle message
6502 *
6503 * This is a work-queue function outside of the interrupt.
6504 */
6505void handle_sma_message(struct work_struct *work)
6506{
6507 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6508 sma_message_work);
6509 struct hfi1_devdata *dd = ppd->dd;
6510 u64 msg;
6511 int ret;
6512
Jubin John4d114fd2016-02-14 20:21:43 -08006513 /*
6514 * msg is bytes 1-4 of the 40-bit idle message - the command code
6515 * is stripped off
6516 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006517 ret = read_idle_sma(dd, &msg);
6518 if (ret)
6519 return;
6520 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6521 /*
6522 * React to the SMA message. Byte[1] (0 for us) is the command.
6523 */
6524 switch (msg & 0xff) {
6525 case SMA_IDLE_ARM:
6526 /*
6527 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6528 * State Transitions
6529 *
6530 * Only expected in INIT or ARMED, discard otherwise.
6531 */
6532 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6533 ppd->neighbor_normal = 1;
6534 break;
6535 case SMA_IDLE_ACTIVE:
6536 /*
6537 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6538 * State Transitions
6539 *
6540 * Can activate the node. Discard otherwise.
6541 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006542 if (ppd->host_link_state == HLS_UP_ARMED &&
6543 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006544 ppd->neighbor_normal = 1;
6545 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6546 if (ret)
6547 dd_dev_err(
6548 dd,
6549 "%s: received Active SMA idle message, couldn't set link to Active\n",
6550 __func__);
6551 }
6552 break;
6553 default:
6554 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006555 "%s: received unexpected SMA idle message 0x%llx\n",
6556 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006557 break;
6558 }
6559}
6560
6561static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6562{
6563 u64 rcvctrl;
6564 unsigned long flags;
6565
6566 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6567 rcvctrl = read_csr(dd, RCV_CTRL);
6568 rcvctrl |= add;
6569 rcvctrl &= ~clear;
6570 write_csr(dd, RCV_CTRL, rcvctrl);
6571 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6572}
6573
6574static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6575{
6576 adjust_rcvctrl(dd, add, 0);
6577}
6578
6579static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6580{
6581 adjust_rcvctrl(dd, 0, clear);
6582}
6583
6584/*
6585 * Called from all interrupt handlers to start handling an SPC freeze.
6586 */
6587void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6588{
6589 struct hfi1_devdata *dd = ppd->dd;
6590 struct send_context *sc;
6591 int i;
6592
6593 if (flags & FREEZE_SELF)
6594 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6595
6596 /* enter frozen mode */
6597 dd->flags |= HFI1_FROZEN;
6598
6599 /* notify all SDMA engines that they are going into a freeze */
6600 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6601
6602 /* do halt pre-handling on all enabled send contexts */
6603 for (i = 0; i < dd->num_send_contexts; i++) {
6604 sc = dd->send_contexts[i].sc;
6605 if (sc && (sc->flags & SCF_ENABLED))
6606 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6607 }
6608
6609 /* Send context are frozen. Notify user space */
6610 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6611
6612 if (flags & FREEZE_ABORT) {
6613 dd_dev_err(dd,
6614 "Aborted freeze recovery. Please REBOOT system\n");
6615 return;
6616 }
6617 /* queue non-interrupt handler */
6618 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6619}
6620
6621/*
6622 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6623 * depending on the "freeze" parameter.
6624 *
6625 * No need to return an error if it times out, our only option
6626 * is to proceed anyway.
6627 */
6628static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6629{
6630 unsigned long timeout;
6631 u64 reg;
6632
6633 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6634 while (1) {
6635 reg = read_csr(dd, CCE_STATUS);
6636 if (freeze) {
6637 /* waiting until all indicators are set */
6638 if ((reg & ALL_FROZE) == ALL_FROZE)
6639 return; /* all done */
6640 } else {
6641 /* waiting until all indicators are clear */
6642 if ((reg & ALL_FROZE) == 0)
6643 return; /* all done */
6644 }
6645
6646 if (time_after(jiffies, timeout)) {
6647 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006648 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6649 freeze ? "" : "un", reg & ALL_FROZE,
6650 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006651 return;
6652 }
6653 usleep_range(80, 120);
6654 }
6655}
6656
6657/*
6658 * Do all freeze handling for the RXE block.
6659 */
6660static void rxe_freeze(struct hfi1_devdata *dd)
6661{
6662 int i;
6663
6664 /* disable port */
6665 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6666
6667 /* disable all receive contexts */
6668 for (i = 0; i < dd->num_rcv_contexts; i++)
6669 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6670}
6671
6672/*
6673 * Unfreeze handling for the RXE block - kernel contexts only.
6674 * This will also enable the port. User contexts will do unfreeze
6675 * handling on a per-context basis as they call into the driver.
6676 *
6677 */
6678static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6679{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006680 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006681 int i;
6682
6683 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006684 for (i = 0; i < dd->n_krcv_queues; i++) {
6685 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6686 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6687 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6688 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6689 hfi1_rcvctrl(dd, rcvmask, i);
6690 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006691
6692 /* enable port */
6693 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6694}
6695
6696/*
6697 * Non-interrupt SPC freeze handling.
6698 *
6699 * This is a work-queue function outside of the triggering interrupt.
6700 */
6701void handle_freeze(struct work_struct *work)
6702{
6703 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6704 freeze_work);
6705 struct hfi1_devdata *dd = ppd->dd;
6706
6707 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006708 wait_for_freeze_status(dd, 1);
6709
6710 /* SPC is now frozen */
6711
6712 /* do send PIO freeze steps */
6713 pio_freeze(dd);
6714
6715 /* do send DMA freeze steps */
6716 sdma_freeze(dd);
6717
6718 /* do send egress freeze steps - nothing to do */
6719
6720 /* do receive freeze steps */
6721 rxe_freeze(dd);
6722
6723 /*
6724 * Unfreeze the hardware - clear the freeze, wait for each
6725 * block's frozen bit to clear, then clear the frozen flag.
6726 */
6727 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6728 wait_for_freeze_status(dd, 0);
6729
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006730 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006731 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6732 wait_for_freeze_status(dd, 1);
6733 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6734 wait_for_freeze_status(dd, 0);
6735 }
6736
6737 /* do send PIO unfreeze steps for kernel contexts */
6738 pio_kernel_unfreeze(dd);
6739
6740 /* do send DMA unfreeze steps */
6741 sdma_unfreeze(dd);
6742
6743 /* do send egress unfreeze steps - nothing to do */
6744
6745 /* do receive unfreeze steps for kernel contexts */
6746 rxe_kernel_unfreeze(dd);
6747
6748 /*
6749 * The unfreeze procedure touches global device registers when
6750 * it disables and re-enables RXE. Mark the device unfrozen
6751 * after all that is done so other parts of the driver waiting
6752 * for the device to unfreeze don't do things out of order.
6753 *
6754 * The above implies that the meaning of HFI1_FROZEN flag is
6755 * "Device has gone into freeze mode and freeze mode handling
6756 * is still in progress."
6757 *
6758 * The flag will be removed when freeze mode processing has
6759 * completed.
6760 */
6761 dd->flags &= ~HFI1_FROZEN;
6762 wake_up(&dd->event_queue);
6763
6764 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006765}
6766
6767/*
6768 * Handle a link up interrupt from the 8051.
6769 *
6770 * This is a work-queue function outside of the interrupt.
6771 */
6772void handle_link_up(struct work_struct *work)
6773{
6774 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006775 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006776 set_link_state(ppd, HLS_UP_INIT);
6777
6778 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6779 read_ltp_rtt(ppd->dd);
6780 /*
6781 * OPA specifies that certain counters are cleared on a transition
6782 * to link up, so do that.
6783 */
6784 clear_linkup_counters(ppd->dd);
6785 /*
6786 * And (re)set link up default values.
6787 */
6788 set_linkup_defaults(ppd);
6789
6790 /* enforce link speed enabled */
6791 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6792 /* oops - current speed is not enabled, bounce */
6793 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006794 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6795 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006796 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006797 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006798 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006799 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006800 start_link(ppd);
6801 }
6802}
6803
Jubin John4d114fd2016-02-14 20:21:43 -08006804/*
6805 * Several pieces of LNI information were cached for SMA in ppd.
6806 * Reset these on link down
6807 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006808static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6809{
6810 ppd->neighbor_guid = 0;
6811 ppd->neighbor_port_number = 0;
6812 ppd->neighbor_type = 0;
6813 ppd->neighbor_fm_security = 0;
6814}
6815
Dean Luickfeb831d2016-04-14 08:31:36 -07006816static const char * const link_down_reason_strs[] = {
6817 [OPA_LINKDOWN_REASON_NONE] = "None",
6818 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6819 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6820 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6821 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6822 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6823 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6824 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6825 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6826 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6827 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6828 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6829 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6830 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6831 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6832 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6833 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6834 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6835 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6836 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6837 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6838 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6839 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6840 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6841 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6842 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6843 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6844 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6848 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6849 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6850 "Excessive buffer overrun",
6851 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6852 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6853 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6854 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6855 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6856 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6857 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6858 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6859 "Local media not installed",
6860 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6861 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6862 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6863 "End to end not installed",
6864 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6865 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6866 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6867 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6868 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6869 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6870};
6871
6872/* return the neighbor link down reason string */
6873static const char *link_down_reason_str(u8 reason)
6874{
6875 const char *str = NULL;
6876
6877 if (reason < ARRAY_SIZE(link_down_reason_strs))
6878 str = link_down_reason_strs[reason];
6879 if (!str)
6880 str = "(invalid)";
6881
6882 return str;
6883}
6884
Mike Marciniszyn77241052015-07-30 15:17:43 -04006885/*
6886 * Handle a link down interrupt from the 8051.
6887 *
6888 * This is a work-queue function outside of the interrupt.
6889 */
6890void handle_link_down(struct work_struct *work)
6891{
6892 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006893 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006894 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006895 link_down_work);
6896 int was_up;
6897 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006898
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006899 if ((ppd->host_link_state &
6900 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6901 ppd->port_type == PORT_TYPE_FIXED)
6902 ppd->offline_disabled_reason =
6903 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6904
6905 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006906 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006907 set_link_state(ppd, HLS_DN_OFFLINE);
6908
Dean Luickfeb831d2016-04-14 08:31:36 -07006909 if (was_up) {
6910 lcl_reason = 0;
6911 /* link down reason is only valid if the link was up */
6912 read_link_down_reason(ppd->dd, &link_down_reason);
6913 switch (link_down_reason) {
6914 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6915 /* the link went down, no idle message reason */
6916 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6917 ldr_str);
6918 break;
6919 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6920 /*
6921 * The neighbor reason is only valid if an idle message
6922 * was received for it.
6923 */
6924 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6925 dd_dev_info(ppd->dd,
6926 "%sNeighbor link down message %d, %s\n",
6927 ldr_str, neigh_reason,
6928 link_down_reason_str(neigh_reason));
6929 break;
6930 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6931 dd_dev_info(ppd->dd,
6932 "%sHost requested link to go offline\n",
6933 ldr_str);
6934 break;
6935 default:
6936 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6937 ldr_str, link_down_reason);
6938 break;
6939 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006940
Dean Luickfeb831d2016-04-14 08:31:36 -07006941 /*
6942 * If no reason, assume peer-initiated but missed
6943 * LinkGoingDown idle flits.
6944 */
6945 if (neigh_reason == 0)
6946 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6947 } else {
6948 /* went down while polling or going up */
6949 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6950 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006951
6952 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6953
Dean Luick015e91f2016-04-14 08:31:42 -07006954 /* inform the SMA when the link transitions from up to down */
6955 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6956 ppd->neigh_link_down_reason.sma == 0) {
6957 ppd->local_link_down_reason.sma =
6958 ppd->local_link_down_reason.latest;
6959 ppd->neigh_link_down_reason.sma =
6960 ppd->neigh_link_down_reason.latest;
6961 }
6962
Mike Marciniszyn77241052015-07-30 15:17:43 -04006963 reset_neighbor_info(ppd);
6964
6965 /* disable the port */
6966 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6967
Jubin John4d114fd2016-02-14 20:21:43 -08006968 /*
6969 * If there is no cable attached, turn the DC off. Otherwise,
6970 * start the link bring up.
6971 */
Easwar Hariharan623bba22016-04-12 11:25:57 -07006972 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006973 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006974 } else {
6975 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006976 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006977 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006978}
6979
6980void handle_link_bounce(struct work_struct *work)
6981{
6982 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6983 link_bounce_work);
6984
6985 /*
6986 * Only do something if the link is currently up.
6987 */
6988 if (ppd->host_link_state & HLS_UP) {
6989 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006990 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006991 start_link(ppd);
6992 } else {
6993 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006994 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006995 }
6996}
6997
6998/*
6999 * Mask conversion: Capability exchange to Port LTP. The capability
7000 * exchange has an implicit 16b CRC that is mandatory.
7001 */
7002static int cap_to_port_ltp(int cap)
7003{
7004 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7005
7006 if (cap & CAP_CRC_14B)
7007 port_ltp |= PORT_LTP_CRC_MODE_14;
7008 if (cap & CAP_CRC_48B)
7009 port_ltp |= PORT_LTP_CRC_MODE_48;
7010 if (cap & CAP_CRC_12B_16B_PER_LANE)
7011 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7012
7013 return port_ltp;
7014}
7015
7016/*
7017 * Convert an OPA Port LTP mask to capability mask
7018 */
7019int port_ltp_to_cap(int port_ltp)
7020{
7021 int cap_mask = 0;
7022
7023 if (port_ltp & PORT_LTP_CRC_MODE_14)
7024 cap_mask |= CAP_CRC_14B;
7025 if (port_ltp & PORT_LTP_CRC_MODE_48)
7026 cap_mask |= CAP_CRC_48B;
7027 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7028 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7029
7030 return cap_mask;
7031}
7032
7033/*
7034 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7035 */
7036static int lcb_to_port_ltp(int lcb_crc)
7037{
7038 int port_ltp = 0;
7039
7040 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7041 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7042 else if (lcb_crc == LCB_CRC_48B)
7043 port_ltp = PORT_LTP_CRC_MODE_48;
7044 else if (lcb_crc == LCB_CRC_14B)
7045 port_ltp = PORT_LTP_CRC_MODE_14;
7046 else
7047 port_ltp = PORT_LTP_CRC_MODE_16;
7048
7049 return port_ltp;
7050}
7051
7052/*
7053 * Our neighbor has indicated that we are allowed to act as a fabric
7054 * manager, so place the full management partition key in the second
7055 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7056 * that we should already have the limited management partition key in
7057 * array element 1, and also that the port is not yet up when
7058 * add_full_mgmt_pkey() is invoked.
7059 */
7060static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7061{
7062 struct hfi1_devdata *dd = ppd->dd;
7063
Dean Luick87645222015-12-01 15:38:21 -05007064 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7065 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7066 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7067 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007068 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7069 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7070}
7071
7072/*
7073 * Convert the given link width to the OPA link width bitmask.
7074 */
7075static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7076{
7077 switch (width) {
7078 case 0:
7079 /*
7080 * Simulator and quick linkup do not set the width.
7081 * Just set it to 4x without complaint.
7082 */
7083 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7084 return OPA_LINK_WIDTH_4X;
7085 return 0; /* no lanes up */
7086 case 1: return OPA_LINK_WIDTH_1X;
7087 case 2: return OPA_LINK_WIDTH_2X;
7088 case 3: return OPA_LINK_WIDTH_3X;
7089 default:
7090 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007091 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007092 /* fall through */
7093 case 4: return OPA_LINK_WIDTH_4X;
7094 }
7095}
7096
7097/*
7098 * Do a population count on the bottom nibble.
7099 */
7100static const u8 bit_counts[16] = {
7101 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7102};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007103
Mike Marciniszyn77241052015-07-30 15:17:43 -04007104static inline u8 nibble_to_count(u8 nibble)
7105{
7106 return bit_counts[nibble & 0xf];
7107}
7108
7109/*
7110 * Read the active lane information from the 8051 registers and return
7111 * their widths.
7112 *
7113 * Active lane information is found in these 8051 registers:
7114 * enable_lane_tx
7115 * enable_lane_rx
7116 */
7117static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7118 u16 *rx_width)
7119{
7120 u16 tx, rx;
7121 u8 enable_lane_rx;
7122 u8 enable_lane_tx;
7123 u8 tx_polarity_inversion;
7124 u8 rx_polarity_inversion;
7125 u8 max_rate;
7126
7127 /* read the active lanes */
7128 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007129 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007130 read_local_lni(dd, &enable_lane_rx);
7131
7132 /* convert to counts */
7133 tx = nibble_to_count(enable_lane_tx);
7134 rx = nibble_to_count(enable_lane_rx);
7135
7136 /*
7137 * Set link_speed_active here, overriding what was set in
7138 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7139 * set the max_rate field in handle_verify_cap until v0.19.
7140 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007141 if ((dd->icode == ICODE_RTL_SILICON) &&
7142 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007143 /* max_rate: 0 = 12.5G, 1 = 25G */
7144 switch (max_rate) {
7145 case 0:
7146 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7147 break;
7148 default:
7149 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007150 "%s: unexpected max rate %d, using 25Gb\n",
7151 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007152 /* fall through */
7153 case 1:
7154 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7155 break;
7156 }
7157 }
7158
7159 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007160 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7161 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007162 *tx_width = link_width_to_bits(dd, tx);
7163 *rx_width = link_width_to_bits(dd, rx);
7164}
7165
7166/*
7167 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7168 * Valid after the end of VerifyCap and during LinkUp. Does not change
7169 * after link up. I.e. look elsewhere for downgrade information.
7170 *
7171 * Bits are:
7172 * + bits [7:4] contain the number of active transmitters
7173 * + bits [3:0] contain the number of active receivers
7174 * These are numbers 1 through 4 and can be different values if the
7175 * link is asymmetric.
7176 *
7177 * verify_cap_local_fm_link_width[0] retains its original value.
7178 */
7179static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7180 u16 *rx_width)
7181{
7182 u16 widths, tx, rx;
7183 u8 misc_bits, local_flags;
7184 u16 active_tx, active_rx;
7185
7186 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7187 tx = widths >> 12;
7188 rx = (widths >> 8) & 0xf;
7189
7190 *tx_width = link_width_to_bits(dd, tx);
7191 *rx_width = link_width_to_bits(dd, rx);
7192
7193 /* print the active widths */
7194 get_link_widths(dd, &active_tx, &active_rx);
7195}
7196
7197/*
7198 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7199 * hardware information when the link first comes up.
7200 *
7201 * The link width is not available until after VerifyCap.AllFramesReceived
7202 * (the trigger for handle_verify_cap), so this is outside that routine
7203 * and should be called when the 8051 signals linkup.
7204 */
7205void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7206{
7207 u16 tx_width, rx_width;
7208
7209 /* get end-of-LNI link widths */
7210 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7211
7212 /* use tx_width as the link is supposed to be symmetric on link up */
7213 ppd->link_width_active = tx_width;
7214 /* link width downgrade active (LWD.A) starts out matching LW.A */
7215 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7216 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7217 /* per OPA spec, on link up LWD.E resets to LWD.S */
7218 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7219 /* cache the active egress rate (units {10^6 bits/sec]) */
7220 ppd->current_egress_rate = active_egress_rate(ppd);
7221}
7222
7223/*
7224 * Handle a verify capabilities interrupt from the 8051.
7225 *
7226 * This is a work-queue function outside of the interrupt.
7227 */
7228void handle_verify_cap(struct work_struct *work)
7229{
7230 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7231 link_vc_work);
7232 struct hfi1_devdata *dd = ppd->dd;
7233 u64 reg;
7234 u8 power_management;
7235 u8 continious;
7236 u8 vcu;
7237 u8 vau;
7238 u8 z;
7239 u16 vl15buf;
7240 u16 link_widths;
7241 u16 crc_mask;
7242 u16 crc_val;
7243 u16 device_id;
7244 u16 active_tx, active_rx;
7245 u8 partner_supported_crc;
7246 u8 remote_tx_rate;
7247 u8 device_rev;
7248
7249 set_link_state(ppd, HLS_VERIFY_CAP);
7250
7251 lcb_shutdown(dd, 0);
7252 adjust_lcb_for_fpga_serdes(dd);
7253
7254 /*
7255 * These are now valid:
7256 * remote VerifyCap fields in the general LNI config
7257 * CSR DC8051_STS_REMOTE_GUID
7258 * CSR DC8051_STS_REMOTE_NODE_TYPE
7259 * CSR DC8051_STS_REMOTE_FM_SECURITY
7260 * CSR DC8051_STS_REMOTE_PORT_NO
7261 */
7262
7263 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007264 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7265 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007266 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7267 read_remote_device_id(dd, &device_id, &device_rev);
7268 /*
7269 * And the 'MgmtAllowed' information, which is exchanged during
7270 * LNI, is also be available at this point.
7271 */
7272 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7273 /* print the active widths */
7274 get_link_widths(dd, &active_tx, &active_rx);
7275 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007276 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7277 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007278 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007279 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7280 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7281 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007282 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007283 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007284 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007285 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007286 /*
7287 * The peer vAU value just read is the peer receiver value. HFI does
7288 * not support a transmit vAU of 0 (AU == 8). We advertised that
7289 * with Z=1 in the fabric capabilities sent to the peer. The peer
7290 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7291 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7292 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7293 * subject to the Z value exception.
7294 */
7295 if (vau == 0)
7296 vau = 1;
7297 set_up_vl15(dd, vau, vl15buf);
7298
7299 /* set up the LCB CRC mode */
7300 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7301
7302 /* order is important: use the lowest bit in common */
7303 if (crc_mask & CAP_CRC_14B)
7304 crc_val = LCB_CRC_14B;
7305 else if (crc_mask & CAP_CRC_48B)
7306 crc_val = LCB_CRC_48B;
7307 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7308 crc_val = LCB_CRC_12B_16B_PER_LANE;
7309 else
7310 crc_val = LCB_CRC_16B;
7311
7312 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7313 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7314 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7315
7316 /* set (14b only) or clear sideband credit */
7317 reg = read_csr(dd, SEND_CM_CTRL);
7318 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7319 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007320 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007321 } else {
7322 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007323 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007324 }
7325
7326 ppd->link_speed_active = 0; /* invalid value */
7327 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7328 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7329 switch (remote_tx_rate) {
7330 case 0:
7331 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7332 break;
7333 case 1:
7334 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7335 break;
7336 }
7337 } else {
7338 /* actual rate is highest bit of the ANDed rates */
7339 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7340
7341 if (rate & 2)
7342 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7343 else if (rate & 1)
7344 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7345 }
7346 if (ppd->link_speed_active == 0) {
7347 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007348 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007349 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7350 }
7351
7352 /*
7353 * Cache the values of the supported, enabled, and active
7354 * LTP CRC modes to return in 'portinfo' queries. But the bit
7355 * flags that are returned in the portinfo query differ from
7356 * what's in the link_crc_mask, crc_sizes, and crc_val
7357 * variables. Convert these here.
7358 */
7359 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7360 /* supported crc modes */
7361 ppd->port_ltp_crc_mode |=
7362 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7363 /* enabled crc modes */
7364 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7365 /* active crc mode */
7366
7367 /* set up the remote credit return table */
7368 assign_remote_cm_au_table(dd, vcu);
7369
7370 /*
7371 * The LCB is reset on entry to handle_verify_cap(), so this must
7372 * be applied on every link up.
7373 *
7374 * Adjust LCB error kill enable to kill the link if
7375 * these RBUF errors are seen:
7376 * REPLAY_BUF_MBE_SMASK
7377 * FLIT_INPUT_BUF_MBE_SMASK
7378 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007379 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007380 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7381 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7382 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7383 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7384 }
7385
7386 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7387 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7388
7389 /* give 8051 access to the LCB CSRs */
7390 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7391 set_8051_lcb_access(dd);
7392
7393 ppd->neighbor_guid =
7394 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7395 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7396 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7397 ppd->neighbor_type =
7398 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7399 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7400 ppd->neighbor_fm_security =
7401 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7402 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7403 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007404 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7405 ppd->neighbor_guid, ppd->neighbor_type,
7406 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007407 if (ppd->mgmt_allowed)
7408 add_full_mgmt_pkey(ppd);
7409
7410 /* tell the 8051 to go to LinkUp */
7411 set_link_state(ppd, HLS_GOING_UP);
7412}
7413
7414/*
7415 * Apply the link width downgrade enabled policy against the current active
7416 * link widths.
7417 *
7418 * Called when the enabled policy changes or the active link widths change.
7419 */
7420void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7421{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007422 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007423 int tries;
7424 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007425 u16 tx, rx;
7426
Dean Luick323fd782015-11-16 21:59:24 -05007427 /* use the hls lock to avoid a race with actual link up */
7428 tries = 0;
7429retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007430 mutex_lock(&ppd->hls_lock);
7431 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007432 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007433 /* still going up..wait and retry */
7434 if (ppd->host_link_state & HLS_GOING_UP) {
7435 if (++tries < 1000) {
7436 mutex_unlock(&ppd->hls_lock);
7437 usleep_range(100, 120); /* arbitrary */
7438 goto retry;
7439 }
7440 dd_dev_err(ppd->dd,
7441 "%s: giving up waiting for link state change\n",
7442 __func__);
7443 }
7444 goto done;
7445 }
7446
7447 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007448
7449 if (refresh_widths) {
7450 get_link_widths(ppd->dd, &tx, &rx);
7451 ppd->link_width_downgrade_tx_active = tx;
7452 ppd->link_width_downgrade_rx_active = rx;
7453 }
7454
Dean Luickf9b56352016-04-14 08:31:30 -07007455 if (ppd->link_width_downgrade_tx_active == 0 ||
7456 ppd->link_width_downgrade_rx_active == 0) {
7457 /* the 8051 reported a dead link as a downgrade */
7458 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7459 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007460 /* downgrade is disabled */
7461
7462 /* bounce if not at starting active width */
7463 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007464 ppd->link_width_downgrade_tx_active) ||
7465 (ppd->link_width_active !=
7466 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007467 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007468 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007469 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007470 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7471 ppd->link_width_active,
7472 ppd->link_width_downgrade_tx_active,
7473 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007474 do_bounce = 1;
7475 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007476 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7477 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007478 /* Tx or Rx is outside the enabled policy */
7479 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007480 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007481 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007482 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7483 lwde, ppd->link_width_downgrade_tx_active,
7484 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007485 do_bounce = 1;
7486 }
7487
Dean Luick323fd782015-11-16 21:59:24 -05007488done:
7489 mutex_unlock(&ppd->hls_lock);
7490
Mike Marciniszyn77241052015-07-30 15:17:43 -04007491 if (do_bounce) {
7492 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007493 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007494 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007495 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007496 start_link(ppd);
7497 }
7498}
7499
7500/*
7501 * Handle a link downgrade interrupt from the 8051.
7502 *
7503 * This is a work-queue function outside of the interrupt.
7504 */
7505void handle_link_downgrade(struct work_struct *work)
7506{
7507 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7508 link_downgrade_work);
7509
7510 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7511 apply_link_downgrade_policy(ppd, 1);
7512}
7513
7514static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7515{
7516 return flag_string(buf, buf_len, flags, dcc_err_flags,
7517 ARRAY_SIZE(dcc_err_flags));
7518}
7519
7520static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7521{
7522 return flag_string(buf, buf_len, flags, lcb_err_flags,
7523 ARRAY_SIZE(lcb_err_flags));
7524}
7525
7526static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7527{
7528 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7529 ARRAY_SIZE(dc8051_err_flags));
7530}
7531
7532static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7533{
7534 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7535 ARRAY_SIZE(dc8051_info_err_flags));
7536}
7537
7538static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7539{
7540 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7541 ARRAY_SIZE(dc8051_info_host_msg_flags));
7542}
7543
7544static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7545{
7546 struct hfi1_pportdata *ppd = dd->pport;
7547 u64 info, err, host_msg;
7548 int queue_link_down = 0;
7549 char buf[96];
7550
7551 /* look at the flags */
7552 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7553 /* 8051 information set by firmware */
7554 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7555 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7556 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7557 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7558 host_msg = (info >>
7559 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7560 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7561
7562 /*
7563 * Handle error flags.
7564 */
7565 if (err & FAILED_LNI) {
7566 /*
7567 * LNI error indications are cleared by the 8051
7568 * only when starting polling. Only pay attention
7569 * to them when in the states that occur during
7570 * LNI.
7571 */
7572 if (ppd->host_link_state
7573 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7574 queue_link_down = 1;
7575 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007576 dc8051_info_err_string(buf,
7577 sizeof(buf),
7578 err &
7579 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007580 }
7581 err &= ~(u64)FAILED_LNI;
7582 }
Dean Luick6d014532015-12-01 15:38:23 -05007583 /* unknown frames can happen durning LNI, just count */
7584 if (err & UNKNOWN_FRAME) {
7585 ppd->unknown_frame_count++;
7586 err &= ~(u64)UNKNOWN_FRAME;
7587 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007588 if (err) {
7589 /* report remaining errors, but do not do anything */
7590 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007591 dc8051_info_err_string(buf, sizeof(buf),
7592 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007593 }
7594
7595 /*
7596 * Handle host message flags.
7597 */
7598 if (host_msg & HOST_REQ_DONE) {
7599 /*
7600 * Presently, the driver does a busy wait for
7601 * host requests to complete. This is only an
7602 * informational message.
7603 * NOTE: The 8051 clears the host message
7604 * information *on the next 8051 command*.
7605 * Therefore, when linkup is achieved,
7606 * this flag will still be set.
7607 */
7608 host_msg &= ~(u64)HOST_REQ_DONE;
7609 }
7610 if (host_msg & BC_SMA_MSG) {
7611 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7612 host_msg &= ~(u64)BC_SMA_MSG;
7613 }
7614 if (host_msg & LINKUP_ACHIEVED) {
7615 dd_dev_info(dd, "8051: Link up\n");
7616 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7617 host_msg &= ~(u64)LINKUP_ACHIEVED;
7618 }
7619 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007620 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007621 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7622 }
7623 if (host_msg & VERIFY_CAP_FRAME) {
7624 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7625 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7626 }
7627 if (host_msg & LINK_GOING_DOWN) {
7628 const char *extra = "";
7629 /* no downgrade action needed if going down */
7630 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7631 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7632 extra = " (ignoring downgrade)";
7633 }
7634 dd_dev_info(dd, "8051: Link down%s\n", extra);
7635 queue_link_down = 1;
7636 host_msg &= ~(u64)LINK_GOING_DOWN;
7637 }
7638 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7639 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7640 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7641 }
7642 if (host_msg) {
7643 /* report remaining messages, but do not do anything */
7644 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007645 dc8051_info_host_msg_string(buf,
7646 sizeof(buf),
7647 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007648 }
7649
7650 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7651 }
7652 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7653 /*
7654 * Lost the 8051 heartbeat. If this happens, we
7655 * receive constant interrupts about it. Disable
7656 * the interrupt after the first.
7657 */
7658 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7659 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007660 read_csr(dd, DC_DC8051_ERR_EN) &
7661 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007662
7663 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7664 }
7665 if (reg) {
7666 /* report the error, but do not do anything */
7667 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007668 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007669 }
7670
7671 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007672 /*
7673 * if the link is already going down or disabled, do not
7674 * queue another
7675 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007676 if ((ppd->host_link_state &
7677 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7678 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007679 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007680 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007681 } else {
7682 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7683 }
7684 }
7685}
7686
7687static const char * const fm_config_txt[] = {
7688[0] =
7689 "BadHeadDist: Distance violation between two head flits",
7690[1] =
7691 "BadTailDist: Distance violation between two tail flits",
7692[2] =
7693 "BadCtrlDist: Distance violation between two credit control flits",
7694[3] =
7695 "BadCrdAck: Credits return for unsupported VL",
7696[4] =
7697 "UnsupportedVLMarker: Received VL Marker",
7698[5] =
7699 "BadPreempt: Exceeded the preemption nesting level",
7700[6] =
7701 "BadControlFlit: Received unsupported control flit",
7702/* no 7 */
7703[8] =
7704 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7705};
7706
7707static const char * const port_rcv_txt[] = {
7708[1] =
7709 "BadPktLen: Illegal PktLen",
7710[2] =
7711 "PktLenTooLong: Packet longer than PktLen",
7712[3] =
7713 "PktLenTooShort: Packet shorter than PktLen",
7714[4] =
7715 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7716[5] =
7717 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7718[6] =
7719 "BadL2: Illegal L2 opcode",
7720[7] =
7721 "BadSC: Unsupported SC",
7722[9] =
7723 "BadRC: Illegal RC",
7724[11] =
7725 "PreemptError: Preempting with same VL",
7726[12] =
7727 "PreemptVL15: Preempting a VL15 packet",
7728};
7729
7730#define OPA_LDR_FMCONFIG_OFFSET 16
7731#define OPA_LDR_PORTRCV_OFFSET 0
7732static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7733{
7734 u64 info, hdr0, hdr1;
7735 const char *extra;
7736 char buf[96];
7737 struct hfi1_pportdata *ppd = dd->pport;
7738 u8 lcl_reason = 0;
7739 int do_bounce = 0;
7740
7741 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7742 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7743 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7744 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7745 /* set status bit */
7746 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7747 }
7748 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7749 }
7750
7751 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7752 struct hfi1_pportdata *ppd = dd->pport;
7753 /* this counter saturates at (2^32) - 1 */
7754 if (ppd->link_downed < (u32)UINT_MAX)
7755 ppd->link_downed++;
7756 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7757 }
7758
7759 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7760 u8 reason_valid = 1;
7761
7762 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7763 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7764 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7765 /* set status bit */
7766 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7767 }
7768 switch (info) {
7769 case 0:
7770 case 1:
7771 case 2:
7772 case 3:
7773 case 4:
7774 case 5:
7775 case 6:
7776 extra = fm_config_txt[info];
7777 break;
7778 case 8:
7779 extra = fm_config_txt[info];
7780 if (ppd->port_error_action &
7781 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7782 do_bounce = 1;
7783 /*
7784 * lcl_reason cannot be derived from info
7785 * for this error
7786 */
7787 lcl_reason =
7788 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7789 }
7790 break;
7791 default:
7792 reason_valid = 0;
7793 snprintf(buf, sizeof(buf), "reserved%lld", info);
7794 extra = buf;
7795 break;
7796 }
7797
7798 if (reason_valid && !do_bounce) {
7799 do_bounce = ppd->port_error_action &
7800 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7801 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7802 }
7803
7804 /* just report this */
7805 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7806 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7807 }
7808
7809 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7810 u8 reason_valid = 1;
7811
7812 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7813 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7814 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7815 if (!(dd->err_info_rcvport.status_and_code &
7816 OPA_EI_STATUS_SMASK)) {
7817 dd->err_info_rcvport.status_and_code =
7818 info & OPA_EI_CODE_SMASK;
7819 /* set status bit */
7820 dd->err_info_rcvport.status_and_code |=
7821 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007822 /*
7823 * save first 2 flits in the packet that caused
7824 * the error
7825 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007826 dd->err_info_rcvport.packet_flit1 = hdr0;
7827 dd->err_info_rcvport.packet_flit2 = hdr1;
7828 }
7829 switch (info) {
7830 case 1:
7831 case 2:
7832 case 3:
7833 case 4:
7834 case 5:
7835 case 6:
7836 case 7:
7837 case 9:
7838 case 11:
7839 case 12:
7840 extra = port_rcv_txt[info];
7841 break;
7842 default:
7843 reason_valid = 0;
7844 snprintf(buf, sizeof(buf), "reserved%lld", info);
7845 extra = buf;
7846 break;
7847 }
7848
7849 if (reason_valid && !do_bounce) {
7850 do_bounce = ppd->port_error_action &
7851 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7852 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7853 }
7854
7855 /* just report this */
7856 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7857 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007858 hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007859
7860 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7861 }
7862
7863 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7864 /* informative only */
7865 dd_dev_info(dd, "8051 access to LCB blocked\n");
7866 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7867 }
7868 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7869 /* informative only */
7870 dd_dev_info(dd, "host access to LCB blocked\n");
7871 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7872 }
7873
7874 /* report any remaining errors */
7875 if (reg)
7876 dd_dev_info(dd, "DCC Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007877 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007878
7879 if (lcl_reason == 0)
7880 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7881
7882 if (do_bounce) {
7883 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7884 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7885 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7886 }
7887}
7888
7889static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7890{
7891 char buf[96];
7892
7893 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007894 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007895}
7896
7897/*
7898 * CCE block DC interrupt. Source is < 8.
7899 */
7900static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7901{
7902 const struct err_reg_info *eri = &dc_errs[source];
7903
7904 if (eri->handler) {
7905 interrupt_clear_down(dd, 0, eri);
7906 } else if (source == 3 /* dc_lbm_int */) {
7907 /*
7908 * This indicates that a parity error has occurred on the
7909 * address/control lines presented to the LBM. The error
7910 * is a single pulse, there is no associated error flag,
7911 * and it is non-maskable. This is because if a parity
7912 * error occurs on the request the request is dropped.
7913 * This should never occur, but it is nice to know if it
7914 * ever does.
7915 */
7916 dd_dev_err(dd, "Parity error in DC LBM block\n");
7917 } else {
7918 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7919 }
7920}
7921
7922/*
7923 * TX block send credit interrupt. Source is < 160.
7924 */
7925static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7926{
7927 sc_group_release_update(dd, source);
7928}
7929
7930/*
7931 * TX block SDMA interrupt. Source is < 48.
7932 *
7933 * SDMA interrupts are grouped by type:
7934 *
7935 * 0 - N-1 = SDma
7936 * N - 2N-1 = SDmaProgress
7937 * 2N - 3N-1 = SDmaIdle
7938 */
7939static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7940{
7941 /* what interrupt */
7942 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7943 /* which engine */
7944 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7945
7946#ifdef CONFIG_SDMA_VERBOSITY
7947 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7948 slashstrip(__FILE__), __LINE__, __func__);
7949 sdma_dumpstate(&dd->per_sdma[which]);
7950#endif
7951
7952 if (likely(what < 3 && which < dd->num_sdma)) {
7953 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7954 } else {
7955 /* should not happen */
7956 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7957 }
7958}
7959
7960/*
7961 * RX block receive available interrupt. Source is < 160.
7962 */
7963static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7964{
7965 struct hfi1_ctxtdata *rcd;
7966 char *err_detail;
7967
7968 if (likely(source < dd->num_rcv_contexts)) {
7969 rcd = dd->rcd[source];
7970 if (rcd) {
7971 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007972 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007973 else
7974 handle_user_interrupt(rcd);
7975 return; /* OK */
7976 }
7977 /* received an interrupt, but no rcd */
7978 err_detail = "dataless";
7979 } else {
7980 /* received an interrupt, but are not using that context */
7981 err_detail = "out of range";
7982 }
7983 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007984 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007985}
7986
7987/*
7988 * RX block receive urgent interrupt. Source is < 160.
7989 */
7990static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7991{
7992 struct hfi1_ctxtdata *rcd;
7993 char *err_detail;
7994
7995 if (likely(source < dd->num_rcv_contexts)) {
7996 rcd = dd->rcd[source];
7997 if (rcd) {
7998 /* only pay attention to user urgent interrupts */
7999 if (source >= dd->first_user_ctxt)
8000 handle_user_interrupt(rcd);
8001 return; /* OK */
8002 }
8003 /* received an interrupt, but no rcd */
8004 err_detail = "dataless";
8005 } else {
8006 /* received an interrupt, but are not using that context */
8007 err_detail = "out of range";
8008 }
8009 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008010 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008011}
8012
8013/*
8014 * Reserved range interrupt. Should not be called in normal operation.
8015 */
8016static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8017{
8018 char name[64];
8019
8020 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008021 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008022}
8023
8024static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008025/*
8026 * start end
8027 * name func interrupt func
8028 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008029{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8030 is_misc_err_name, is_misc_err_int },
8031{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8032 is_sdma_eng_err_name, is_sdma_eng_err_int },
8033{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8034 is_sendctxt_err_name, is_sendctxt_err_int },
8035{ IS_SDMA_START, IS_SDMA_END,
8036 is_sdma_eng_name, is_sdma_eng_int },
8037{ IS_VARIOUS_START, IS_VARIOUS_END,
8038 is_various_name, is_various_int },
8039{ IS_DC_START, IS_DC_END,
8040 is_dc_name, is_dc_int },
8041{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8042 is_rcv_avail_name, is_rcv_avail_int },
8043{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8044 is_rcv_urgent_name, is_rcv_urgent_int },
8045{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8046 is_send_credit_name, is_send_credit_int},
8047{ IS_RESERVED_START, IS_RESERVED_END,
8048 is_reserved_name, is_reserved_int},
8049};
8050
8051/*
8052 * Interrupt source interrupt - called when the given source has an interrupt.
8053 * Source is a bit index into an array of 64-bit integers.
8054 */
8055static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8056{
8057 const struct is_table *entry;
8058
8059 /* avoids a double compare by walking the table in-order */
8060 for (entry = &is_table[0]; entry->is_name; entry++) {
8061 if (source < entry->end) {
8062 trace_hfi1_interrupt(dd, entry, source);
8063 entry->is_int(dd, source - entry->start);
8064 return;
8065 }
8066 }
8067 /* fell off the end */
8068 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8069}
8070
8071/*
8072 * General interrupt handler. This is able to correctly handle
8073 * all interrupts in case INTx is used.
8074 */
8075static irqreturn_t general_interrupt(int irq, void *data)
8076{
8077 struct hfi1_devdata *dd = data;
8078 u64 regs[CCE_NUM_INT_CSRS];
8079 u32 bit;
8080 int i;
8081
8082 this_cpu_inc(*dd->int_counter);
8083
8084 /* phase 1: scan and clear all handled interrupts */
8085 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8086 if (dd->gi_mask[i] == 0) {
8087 regs[i] = 0; /* used later */
8088 continue;
8089 }
8090 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8091 dd->gi_mask[i];
8092 /* only clear if anything is set */
8093 if (regs[i])
8094 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8095 }
8096
8097 /* phase 2: call the appropriate handler */
8098 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008099 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008100 is_interrupt(dd, bit);
8101 }
8102
8103 return IRQ_HANDLED;
8104}
8105
8106static irqreturn_t sdma_interrupt(int irq, void *data)
8107{
8108 struct sdma_engine *sde = data;
8109 struct hfi1_devdata *dd = sde->dd;
8110 u64 status;
8111
8112#ifdef CONFIG_SDMA_VERBOSITY
8113 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8114 slashstrip(__FILE__), __LINE__, __func__);
8115 sdma_dumpstate(sde);
8116#endif
8117
8118 this_cpu_inc(*dd->int_counter);
8119
8120 /* This read_csr is really bad in the hot path */
8121 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008122 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8123 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008124 if (likely(status)) {
8125 /* clear the interrupt(s) */
8126 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008127 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8128 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008129
8130 /* handle the interrupt(s) */
8131 sdma_engine_interrupt(sde, status);
8132 } else
8133 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008134 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008135
8136 return IRQ_HANDLED;
8137}
8138
8139/*
Dean Luickecd42f82016-02-03 14:35:14 -08008140 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8141 * to insure that the write completed. This does NOT guarantee that
8142 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008143 */
8144static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8145{
8146 struct hfi1_devdata *dd = rcd->dd;
8147 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8148
8149 mmiowb(); /* make sure everything before is written */
8150 write_csr(dd, addr, rcd->imask);
8151 /* force the above write on the chip and get a value back */
8152 (void)read_csr(dd, addr);
8153}
8154
8155/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008156void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008157{
8158 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8159}
8160
Dean Luickecd42f82016-02-03 14:35:14 -08008161/*
8162 * Return non-zero if a packet is present.
8163 *
8164 * This routine is called when rechecking for packets after the RcvAvail
8165 * interrupt has been cleared down. First, do a quick check of memory for
8166 * a packet present. If not found, use an expensive CSR read of the context
8167 * tail to determine the actual tail. The CSR read is necessary because there
8168 * is no method to push pending DMAs to memory other than an interrupt and we
8169 * are trying to determine if we need to force an interrupt.
8170 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008171static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8172{
Dean Luickecd42f82016-02-03 14:35:14 -08008173 u32 tail;
8174 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008175
Dean Luickecd42f82016-02-03 14:35:14 -08008176 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8177 present = (rcd->seq_cnt ==
8178 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8179 else /* is RDMA rtail */
8180 present = (rcd->head != get_rcvhdrtail(rcd));
8181
8182 if (present)
8183 return 1;
8184
8185 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8186 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8187 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008188}
8189
8190/*
8191 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8192 * This routine will try to handle packets immediately (latency), but if
8193 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008194 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008195 * invoked) is finished. The intent is to avoid extra interrupts while we
8196 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008197 */
8198static irqreturn_t receive_context_interrupt(int irq, void *data)
8199{
8200 struct hfi1_ctxtdata *rcd = data;
8201 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008202 int disposition;
8203 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008204
8205 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8206 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008207 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008208
Dean Luickf4f30031c2015-10-26 10:28:44 -04008209 /* receive interrupt remains blocked while processing packets */
8210 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008211
Dean Luickf4f30031c2015-10-26 10:28:44 -04008212 /*
8213 * Too many packets were seen while processing packets in this
8214 * IRQ handler. Invoke the handler thread. The receive interrupt
8215 * remains blocked.
8216 */
8217 if (disposition == RCV_PKT_LIMIT)
8218 return IRQ_WAKE_THREAD;
8219
8220 /*
8221 * The packet processor detected no more packets. Clear the receive
8222 * interrupt and recheck for a packet packet that may have arrived
8223 * after the previous check and interrupt clear. If a packet arrived,
8224 * force another interrupt.
8225 */
8226 clear_recv_intr(rcd);
8227 present = check_packet_present(rcd);
8228 if (present)
8229 force_recv_intr(rcd);
8230
8231 return IRQ_HANDLED;
8232}
8233
8234/*
8235 * Receive packet thread handler. This expects to be invoked with the
8236 * receive interrupt still blocked.
8237 */
8238static irqreturn_t receive_context_thread(int irq, void *data)
8239{
8240 struct hfi1_ctxtdata *rcd = data;
8241 int present;
8242
8243 /* receive interrupt is still blocked from the IRQ handler */
8244 (void)rcd->do_interrupt(rcd, 1);
8245
8246 /*
8247 * The packet processor will only return if it detected no more
8248 * packets. Hold IRQs here so we can safely clear the interrupt and
8249 * recheck for a packet that may have arrived after the previous
8250 * check and the interrupt clear. If a packet arrived, force another
8251 * interrupt.
8252 */
8253 local_irq_disable();
8254 clear_recv_intr(rcd);
8255 present = check_packet_present(rcd);
8256 if (present)
8257 force_recv_intr(rcd);
8258 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008259
8260 return IRQ_HANDLED;
8261}
8262
8263/* ========================================================================= */
8264
8265u32 read_physical_state(struct hfi1_devdata *dd)
8266{
8267 u64 reg;
8268
8269 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8270 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8271 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8272}
8273
Jim Snowfb9036d2016-01-11 18:32:21 -05008274u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008275{
8276 u64 reg;
8277
8278 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8279 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8280 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8281}
8282
8283static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8284{
8285 u64 reg;
8286
8287 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8288 /* clear current state, set new state */
8289 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8290 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8291 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8292}
8293
8294/*
8295 * Use the 8051 to read a LCB CSR.
8296 */
8297static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8298{
8299 u32 regno;
8300 int ret;
8301
8302 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8303 if (acquire_lcb_access(dd, 0) == 0) {
8304 *data = read_csr(dd, addr);
8305 release_lcb_access(dd, 0);
8306 return 0;
8307 }
8308 return -EBUSY;
8309 }
8310
8311 /* register is an index of LCB registers: (offset - base) / 8 */
8312 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8313 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8314 if (ret != HCMD_SUCCESS)
8315 return -EBUSY;
8316 return 0;
8317}
8318
8319/*
8320 * Read an LCB CSR. Access may not be in host control, so check.
8321 * Return 0 on success, -EBUSY on failure.
8322 */
8323int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8324{
8325 struct hfi1_pportdata *ppd = dd->pport;
8326
8327 /* if up, go through the 8051 for the value */
8328 if (ppd->host_link_state & HLS_UP)
8329 return read_lcb_via_8051(dd, addr, data);
8330 /* if going up or down, no access */
8331 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8332 return -EBUSY;
8333 /* otherwise, host has access */
8334 *data = read_csr(dd, addr);
8335 return 0;
8336}
8337
8338/*
8339 * Use the 8051 to write a LCB CSR.
8340 */
8341static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8342{
Dean Luick3bf40d62015-11-06 20:07:04 -05008343 u32 regno;
8344 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008345
Dean Luick3bf40d62015-11-06 20:07:04 -05008346 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8347 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8348 if (acquire_lcb_access(dd, 0) == 0) {
8349 write_csr(dd, addr, data);
8350 release_lcb_access(dd, 0);
8351 return 0;
8352 }
8353 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008354 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008355
8356 /* register is an index of LCB registers: (offset - base) / 8 */
8357 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8358 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8359 if (ret != HCMD_SUCCESS)
8360 return -EBUSY;
8361 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008362}
8363
8364/*
8365 * Write an LCB CSR. Access may not be in host control, so check.
8366 * Return 0 on success, -EBUSY on failure.
8367 */
8368int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8369{
8370 struct hfi1_pportdata *ppd = dd->pport;
8371
8372 /* if up, go through the 8051 for the value */
8373 if (ppd->host_link_state & HLS_UP)
8374 return write_lcb_via_8051(dd, addr, data);
8375 /* if going up or down, no access */
8376 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8377 return -EBUSY;
8378 /* otherwise, host has access */
8379 write_csr(dd, addr, data);
8380 return 0;
8381}
8382
8383/*
8384 * Returns:
8385 * < 0 = Linux error, not able to get access
8386 * > 0 = 8051 command RETURN_CODE
8387 */
8388static int do_8051_command(
8389 struct hfi1_devdata *dd,
8390 u32 type,
8391 u64 in_data,
8392 u64 *out_data)
8393{
8394 u64 reg, completed;
8395 int return_code;
8396 unsigned long flags;
8397 unsigned long timeout;
8398
8399 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8400
8401 /*
8402 * Alternative to holding the lock for a long time:
8403 * - keep busy wait - have other users bounce off
8404 */
8405 spin_lock_irqsave(&dd->dc8051_lock, flags);
8406
8407 /* We can't send any commands to the 8051 if it's in reset */
8408 if (dd->dc_shutdown) {
8409 return_code = -ENODEV;
8410 goto fail;
8411 }
8412
8413 /*
8414 * If an 8051 host command timed out previously, then the 8051 is
8415 * stuck.
8416 *
8417 * On first timeout, attempt to reset and restart the entire DC
8418 * block (including 8051). (Is this too big of a hammer?)
8419 *
8420 * If the 8051 times out a second time, the reset did not bring it
8421 * back to healthy life. In that case, fail any subsequent commands.
8422 */
8423 if (dd->dc8051_timed_out) {
8424 if (dd->dc8051_timed_out > 1) {
8425 dd_dev_err(dd,
8426 "Previous 8051 host command timed out, skipping command %u\n",
8427 type);
8428 return_code = -ENXIO;
8429 goto fail;
8430 }
8431 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8432 dc_shutdown(dd);
8433 dc_start(dd);
8434 spin_lock_irqsave(&dd->dc8051_lock, flags);
8435 }
8436
8437 /*
8438 * If there is no timeout, then the 8051 command interface is
8439 * waiting for a command.
8440 */
8441
8442 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008443 * When writing a LCB CSR, out_data contains the full value to
8444 * to be written, while in_data contains the relative LCB
8445 * address in 7:0. Do the work here, rather than the caller,
8446 * of distrubting the write data to where it needs to go:
8447 *
8448 * Write data
8449 * 39:00 -> in_data[47:8]
8450 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8451 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8452 */
8453 if (type == HCMD_WRITE_LCB_CSR) {
8454 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8455 reg = ((((*out_data) >> 40) & 0xff) <<
8456 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8457 | ((((*out_data) >> 48) & 0xffff) <<
8458 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8459 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8460 }
8461
8462 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008463 * Do two writes: the first to stabilize the type and req_data, the
8464 * second to activate.
8465 */
8466 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8467 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8468 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8469 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8470 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8471 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8472 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8473
8474 /* wait for completion, alternate: interrupt */
8475 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8476 while (1) {
8477 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8478 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8479 if (completed)
8480 break;
8481 if (time_after(jiffies, timeout)) {
8482 dd->dc8051_timed_out++;
8483 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8484 if (out_data)
8485 *out_data = 0;
8486 return_code = -ETIMEDOUT;
8487 goto fail;
8488 }
8489 udelay(2);
8490 }
8491
8492 if (out_data) {
8493 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8494 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8495 if (type == HCMD_READ_LCB_CSR) {
8496 /* top 16 bits are in a different register */
8497 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8498 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8499 << (48
8500 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8501 }
8502 }
8503 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8504 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8505 dd->dc8051_timed_out = 0;
8506 /*
8507 * Clear command for next user.
8508 */
8509 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8510
8511fail:
8512 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8513
8514 return return_code;
8515}
8516
8517static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8518{
8519 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8520}
8521
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008522int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8523 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008524{
8525 u64 data;
8526 int ret;
8527
8528 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8529 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8530 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8531 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8532 if (ret != HCMD_SUCCESS) {
8533 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008534 "load 8051 config: field id %d, lane %d, err %d\n",
8535 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008536 }
8537 return ret;
8538}
8539
8540/*
8541 * Read the 8051 firmware "registers". Use the RAM directly. Always
8542 * set the result, even on error.
8543 * Return 0 on success, -errno on failure
8544 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008545int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8546 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008547{
8548 u64 big_data;
8549 u32 addr;
8550 int ret;
8551
8552 /* address start depends on the lane_id */
8553 if (lane_id < 4)
8554 addr = (4 * NUM_GENERAL_FIELDS)
8555 + (lane_id * 4 * NUM_LANE_FIELDS);
8556 else
8557 addr = 0;
8558 addr += field_id * 4;
8559
8560 /* read is in 8-byte chunks, hardware will truncate the address down */
8561 ret = read_8051_data(dd, addr, 8, &big_data);
8562
8563 if (ret == 0) {
8564 /* extract the 4 bytes we want */
8565 if (addr & 0x4)
8566 *result = (u32)(big_data >> 32);
8567 else
8568 *result = (u32)big_data;
8569 } else {
8570 *result = 0;
8571 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008572 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008573 }
8574
8575 return ret;
8576}
8577
8578static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8579 u8 continuous)
8580{
8581 u32 frame;
8582
8583 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8584 | power_management << POWER_MANAGEMENT_SHIFT;
8585 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8586 GENERAL_CONFIG, frame);
8587}
8588
8589static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8590 u16 vl15buf, u8 crc_sizes)
8591{
8592 u32 frame;
8593
8594 frame = (u32)vau << VAU_SHIFT
8595 | (u32)z << Z_SHIFT
8596 | (u32)vcu << VCU_SHIFT
8597 | (u32)vl15buf << VL15BUF_SHIFT
8598 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8599 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8600 GENERAL_CONFIG, frame);
8601}
8602
8603static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8604 u8 *flag_bits, u16 *link_widths)
8605{
8606 u32 frame;
8607
8608 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008609 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008610 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8611 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8612 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8613}
8614
8615static int write_vc_local_link_width(struct hfi1_devdata *dd,
8616 u8 misc_bits,
8617 u8 flag_bits,
8618 u16 link_widths)
8619{
8620 u32 frame;
8621
8622 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8623 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8624 | (u32)link_widths << LINK_WIDTH_SHIFT;
8625 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8626 frame);
8627}
8628
8629static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8630 u8 device_rev)
8631{
8632 u32 frame;
8633
8634 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8635 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8636 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8637}
8638
8639static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8640 u8 *device_rev)
8641{
8642 u32 frame;
8643
8644 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8645 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8646 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8647 & REMOTE_DEVICE_REV_MASK;
8648}
8649
8650void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8651{
8652 u32 frame;
8653
8654 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8655 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8656 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8657}
8658
8659static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8660 u8 *continuous)
8661{
8662 u32 frame;
8663
8664 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8665 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8666 & POWER_MANAGEMENT_MASK;
8667 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8668 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8669}
8670
8671static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8672 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8673{
8674 u32 frame;
8675
8676 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8677 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8678 *z = (frame >> Z_SHIFT) & Z_MASK;
8679 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8680 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8681 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8682}
8683
8684static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8685 u8 *remote_tx_rate,
8686 u16 *link_widths)
8687{
8688 u32 frame;
8689
8690 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008691 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008692 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8693 & REMOTE_TX_RATE_MASK;
8694 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8695}
8696
8697static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8698{
8699 u32 frame;
8700
8701 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8702 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8703}
8704
8705static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8706{
8707 u32 frame;
8708
8709 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8710 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8711}
8712
8713static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8714{
8715 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8716}
8717
8718static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8719{
8720 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8721}
8722
8723void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8724{
8725 u32 frame;
8726 int ret;
8727
8728 *link_quality = 0;
8729 if (dd->pport->host_link_state & HLS_UP) {
8730 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008731 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008732 if (ret == 0)
8733 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8734 & LINK_QUALITY_MASK;
8735 }
8736}
8737
8738static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8739{
8740 u32 frame;
8741
8742 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8743 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8744}
8745
Dean Luickfeb831d2016-04-14 08:31:36 -07008746static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8747{
8748 u32 frame;
8749
8750 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8751 *ldr = (frame & 0xff);
8752}
8753
Mike Marciniszyn77241052015-07-30 15:17:43 -04008754static int read_tx_settings(struct hfi1_devdata *dd,
8755 u8 *enable_lane_tx,
8756 u8 *tx_polarity_inversion,
8757 u8 *rx_polarity_inversion,
8758 u8 *max_rate)
8759{
8760 u32 frame;
8761 int ret;
8762
8763 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8764 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8765 & ENABLE_LANE_TX_MASK;
8766 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8767 & TX_POLARITY_INVERSION_MASK;
8768 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8769 & RX_POLARITY_INVERSION_MASK;
8770 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8771 return ret;
8772}
8773
8774static int write_tx_settings(struct hfi1_devdata *dd,
8775 u8 enable_lane_tx,
8776 u8 tx_polarity_inversion,
8777 u8 rx_polarity_inversion,
8778 u8 max_rate)
8779{
8780 u32 frame;
8781
8782 /* no need to mask, all variable sizes match field widths */
8783 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8784 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8785 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8786 | max_rate << MAX_RATE_SHIFT;
8787 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8788}
8789
8790static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8791{
8792 u32 frame, version, prod_id;
8793 int ret, lane;
8794
8795 /* 4 lanes */
8796 for (lane = 0; lane < 4; lane++) {
8797 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8798 if (ret) {
Jubin John17fb4f22016-02-14 20:21:52 -08008799 dd_dev_err(dd,
8800 "Unable to read lane %d firmware details\n",
8801 lane);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008802 continue;
8803 }
8804 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8805 & SPICO_ROM_VERSION_MASK;
8806 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8807 & SPICO_ROM_PROD_ID_MASK;
8808 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008809 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8810 lane, version, prod_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008811 }
8812}
8813
8814/*
8815 * Read an idle LCB message.
8816 *
8817 * Returns 0 on success, -EINVAL on error
8818 */
8819static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8820{
8821 int ret;
8822
Jubin John17fb4f22016-02-14 20:21:52 -08008823 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008824 if (ret != HCMD_SUCCESS) {
8825 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008826 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008827 return -EINVAL;
8828 }
8829 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8830 /* return only the payload as we already know the type */
8831 *data_out >>= IDLE_PAYLOAD_SHIFT;
8832 return 0;
8833}
8834
8835/*
8836 * Read an idle SMA message. To be done in response to a notification from
8837 * the 8051.
8838 *
8839 * Returns 0 on success, -EINVAL on error
8840 */
8841static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8842{
Jubin John17fb4f22016-02-14 20:21:52 -08008843 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8844 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008845}
8846
8847/*
8848 * Send an idle LCB message.
8849 *
8850 * Returns 0 on success, -EINVAL on error
8851 */
8852static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8853{
8854 int ret;
8855
8856 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8857 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8858 if (ret != HCMD_SUCCESS) {
8859 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008860 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008861 return -EINVAL;
8862 }
8863 return 0;
8864}
8865
8866/*
8867 * Send an idle SMA message.
8868 *
8869 * Returns 0 on success, -EINVAL on error
8870 */
8871int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8872{
8873 u64 data;
8874
Jubin John17fb4f22016-02-14 20:21:52 -08008875 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8876 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008877 return send_idle_message(dd, data);
8878}
8879
8880/*
8881 * Initialize the LCB then do a quick link up. This may or may not be
8882 * in loopback.
8883 *
8884 * return 0 on success, -errno on error
8885 */
8886static int do_quick_linkup(struct hfi1_devdata *dd)
8887{
8888 u64 reg;
8889 unsigned long timeout;
8890 int ret;
8891
8892 lcb_shutdown(dd, 0);
8893
8894 if (loopback) {
8895 /* LCB_CFG_LOOPBACK.VAL = 2 */
8896 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8897 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008898 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008899 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8900 }
8901
8902 /* start the LCBs */
8903 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8904 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8905
8906 /* simulator only loopback steps */
8907 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8908 /* LCB_CFG_RUN.EN = 1 */
8909 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008910 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008911
8912 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8913 timeout = jiffies + msecs_to_jiffies(10);
8914 while (1) {
Jubin John17fb4f22016-02-14 20:21:52 -08008915 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008916 if (reg)
8917 break;
8918 if (time_after(jiffies, timeout)) {
8919 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008920 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008921 return -ETIMEDOUT;
8922 }
8923 udelay(2);
8924 }
8925
8926 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008927 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008928 }
8929
8930 if (!loopback) {
8931 /*
8932 * When doing quick linkup and not in loopback, both
8933 * sides must be done with LCB set-up before either
8934 * starts the quick linkup. Put a delay here so that
8935 * both sides can be started and have a chance to be
8936 * done with LCB set up before resuming.
8937 */
8938 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008939 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008940 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008941 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008942 }
8943
8944 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8945 set_8051_lcb_access(dd);
8946
8947 /*
8948 * State "quick" LinkUp request sets the physical link state to
8949 * LinkUp without a verify capability sequence.
8950 * This state is in simulator v37 and later.
8951 */
8952 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8953 if (ret != HCMD_SUCCESS) {
8954 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008955 "%s: set physical link state to quick LinkUp failed with return %d\n",
8956 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008957
8958 set_host_lcb_access(dd);
8959 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8960
8961 if (ret >= 0)
8962 ret = -EINVAL;
8963 return ret;
8964 }
8965
8966 return 0; /* success */
8967}
8968
8969/*
8970 * Set the SerDes to internal loopback mode.
8971 * Returns 0 on success, -errno on error.
8972 */
8973static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8974{
8975 int ret;
8976
8977 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8978 if (ret == HCMD_SUCCESS)
8979 return 0;
8980 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008981 "Set physical link state to SerDes Loopback failed with return %d\n",
8982 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008983 if (ret >= 0)
8984 ret = -EINVAL;
8985 return ret;
8986}
8987
8988/*
8989 * Do all special steps to set up loopback.
8990 */
8991static int init_loopback(struct hfi1_devdata *dd)
8992{
8993 dd_dev_info(dd, "Entering loopback mode\n");
8994
8995 /* all loopbacks should disable self GUID check */
8996 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08008997 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008998
8999 /*
9000 * The simulator has only one loopback option - LCB. Switch
9001 * to that option, which includes quick link up.
9002 *
9003 * Accept all valid loopback values.
9004 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009005 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9006 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9007 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009008 loopback = LOOPBACK_LCB;
9009 quick_linkup = 1;
9010 return 0;
9011 }
9012
9013 /* handle serdes loopback */
9014 if (loopback == LOOPBACK_SERDES) {
9015 /* internal serdes loopack needs quick linkup on RTL */
9016 if (dd->icode == ICODE_RTL_SILICON)
9017 quick_linkup = 1;
9018 return set_serdes_loopback_mode(dd);
9019 }
9020
9021 /* LCB loopback - handled at poll time */
9022 if (loopback == LOOPBACK_LCB) {
9023 quick_linkup = 1; /* LCB is always quick linkup */
9024
9025 /* not supported in emulation due to emulation RTL changes */
9026 if (dd->icode == ICODE_FPGA_EMULATION) {
9027 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009028 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009029 return -EINVAL;
9030 }
9031 return 0;
9032 }
9033
9034 /* external cable loopback requires no extra steps */
9035 if (loopback == LOOPBACK_CABLE)
9036 return 0;
9037
9038 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9039 return -EINVAL;
9040}
9041
9042/*
9043 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9044 * used in the Verify Capability link width attribute.
9045 */
9046static u16 opa_to_vc_link_widths(u16 opa_widths)
9047{
9048 int i;
9049 u16 result = 0;
9050
9051 static const struct link_bits {
9052 u16 from;
9053 u16 to;
9054 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009055 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9056 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9057 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9058 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009059 };
9060
9061 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9062 if (opa_widths & opa_link_xlate[i].from)
9063 result |= opa_link_xlate[i].to;
9064 }
9065 return result;
9066}
9067
9068/*
9069 * Set link attributes before moving to polling.
9070 */
9071static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9072{
9073 struct hfi1_devdata *dd = ppd->dd;
9074 u8 enable_lane_tx;
9075 u8 tx_polarity_inversion;
9076 u8 rx_polarity_inversion;
9077 int ret;
9078
9079 /* reset our fabric serdes to clear any lingering problems */
9080 fabric_serdes_reset(dd);
9081
9082 /* set the local tx rate - need to read-modify-write */
9083 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009084 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009085 if (ret)
9086 goto set_local_link_attributes_fail;
9087
9088 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9089 /* set the tx rate to the fastest enabled */
9090 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9091 ppd->local_tx_rate = 1;
9092 else
9093 ppd->local_tx_rate = 0;
9094 } else {
9095 /* set the tx rate to all enabled */
9096 ppd->local_tx_rate = 0;
9097 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9098 ppd->local_tx_rate |= 2;
9099 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9100 ppd->local_tx_rate |= 1;
9101 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009102
9103 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009104 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009105 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009106 if (ret != HCMD_SUCCESS)
9107 goto set_local_link_attributes_fail;
9108
9109 /*
9110 * DC supports continuous updates.
9111 */
Jubin John17fb4f22016-02-14 20:21:52 -08009112 ret = write_vc_local_phy(dd,
9113 0 /* no power management */,
9114 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009115 if (ret != HCMD_SUCCESS)
9116 goto set_local_link_attributes_fail;
9117
9118 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9119 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9120 ppd->port_crc_mode_enabled);
9121 if (ret != HCMD_SUCCESS)
9122 goto set_local_link_attributes_fail;
9123
9124 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009125 opa_to_vc_link_widths(
9126 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009127 if (ret != HCMD_SUCCESS)
9128 goto set_local_link_attributes_fail;
9129
9130 /* let peer know who we are */
9131 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9132 if (ret == HCMD_SUCCESS)
9133 return 0;
9134
9135set_local_link_attributes_fail:
9136 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009137 "Failed to set local link attributes, return 0x%x\n",
9138 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009139 return ret;
9140}
9141
9142/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009143 * Call this to start the link.
9144 * Do not do anything if the link is disabled.
9145 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009146 */
9147int start_link(struct hfi1_pportdata *ppd)
9148{
9149 if (!ppd->link_enabled) {
9150 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009151 "%s: stopping link start because link is disabled\n",
9152 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009153 return 0;
9154 }
9155 if (!ppd->driver_link_ready) {
9156 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009157 "%s: stopping link start because driver is not ready\n",
9158 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009159 return 0;
9160 }
9161
Easwar Hariharan623bba22016-04-12 11:25:57 -07009162 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009163}
9164
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009165static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9166{
9167 struct hfi1_devdata *dd = ppd->dd;
9168 u64 mask;
9169 unsigned long timeout;
9170
9171 /*
9172 * Check for QSFP interrupt for t_init (SFF 8679)
9173 */
9174 timeout = jiffies + msecs_to_jiffies(2000);
9175 while (1) {
9176 mask = read_csr(dd, dd->hfi1_id ?
9177 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9178 if (!(mask & QSFP_HFI0_INT_N)) {
9179 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9180 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9181 break;
9182 }
9183 if (time_after(jiffies, timeout)) {
9184 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9185 __func__);
9186 break;
9187 }
9188 udelay(2);
9189 }
9190}
9191
9192static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9193{
9194 struct hfi1_devdata *dd = ppd->dd;
9195 u64 mask;
9196
9197 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9198 if (enable)
9199 mask |= (u64)QSFP_HFI0_INT_N;
9200 else
9201 mask &= ~(u64)QSFP_HFI0_INT_N;
9202 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9203}
9204
9205void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009206{
9207 struct hfi1_devdata *dd = ppd->dd;
9208 u64 mask, qsfp_mask;
9209
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009210 /* Disable INT_N from triggering QSFP interrupts */
9211 set_qsfp_int_n(ppd, 0);
9212
9213 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009214 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009215
9216 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009217 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009218 qsfp_mask &= ~mask;
9219 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009220 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009221
9222 udelay(10);
9223
9224 qsfp_mask |= mask;
9225 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009226 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009227
9228 wait_for_qsfp_init(ppd);
9229
9230 /*
9231 * Allow INT_N to trigger the QSFP interrupt to watch
9232 * for alarms and warnings
9233 */
9234 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009235}
9236
9237static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9238 u8 *qsfp_interrupt_status)
9239{
9240 struct hfi1_devdata *dd = ppd->dd;
9241
9242 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009243 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9244 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9245 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009246
9247 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009248 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9249 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9250 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009251
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009252 /*
9253 * The remaining alarms/warnings don't matter if the link is down.
9254 */
9255 if (ppd->host_link_state & HLS_DOWN)
9256 return 0;
9257
Mike Marciniszyn77241052015-07-30 15:17:43 -04009258 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009259 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9260 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9261 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009262
9263 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009264 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9265 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9266 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009267
9268 /* Byte 2 is vendor specific */
9269
9270 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009271 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9272 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9273 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009274
9275 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009276 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9277 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9278 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009279
9280 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009281 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9282 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9283 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009284
9285 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009286 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9287 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9288 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009289
9290 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009291 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9292 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9293 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009294
9295 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009296 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9297 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9298 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009299
9300 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009301 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9302 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9303 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009304
9305 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009306 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9307 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9308 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009309
9310 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009311 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9312 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9313 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009314
9315 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009316 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9317 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9318 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009319
9320 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009321 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9322 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9323 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009324
9325 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009326 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9327 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9328 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009329
9330 /* Bytes 9-10 and 11-12 are reserved */
9331 /* Bytes 13-15 are vendor specific */
9332
9333 return 0;
9334}
9335
Easwar Hariharan623bba22016-04-12 11:25:57 -07009336/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009337void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009338{
9339 struct qsfp_data *qd;
9340 struct hfi1_pportdata *ppd;
9341 struct hfi1_devdata *dd;
9342
9343 qd = container_of(work, struct qsfp_data, qsfp_work);
9344 ppd = qd->ppd;
9345 dd = ppd->dd;
9346
9347 /* Sanity check */
9348 if (!qsfp_mod_present(ppd))
9349 return;
9350
9351 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009352 * Turn DC back on after cable has been re-inserted. Up until
9353 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009354 */
9355 dc_start(dd);
9356
9357 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009358 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009359
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009360 wait_for_qsfp_init(ppd);
9361
9362 /*
9363 * Allow INT_N to trigger the QSFP interrupt to watch
9364 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009365 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009366 set_qsfp_int_n(ppd, 1);
9367
9368 tune_serdes(ppd);
9369
9370 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009371 }
9372
9373 if (qd->check_interrupt_flags) {
9374 u8 qsfp_interrupt_status[16] = {0,};
9375
Dean Luick765a6fa2016-03-05 08:50:06 -08009376 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9377 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009378 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009379 "%s: Failed to read status of QSFP module\n",
9380 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009381 } else {
9382 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009383
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009384 handle_qsfp_error_conditions(
9385 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009386 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9387 ppd->qsfp_info.check_interrupt_flags = 0;
9388 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009389 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009390 }
9391 }
9392}
9393
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009394static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009395{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009396 struct hfi1_pportdata *ppd = dd->pport;
9397 u64 qsfp_mask, cce_int_mask;
9398 const int qsfp1_int_smask = QSFP1_INT % 64;
9399 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009400
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009401 /*
9402 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9403 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9404 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9405 * the index of the appropriate CSR in the CCEIntMask CSR array
9406 */
9407 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9408 (8 * (QSFP1_INT / 64)));
9409 if (dd->hfi1_id) {
9410 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9411 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9412 cce_int_mask);
9413 } else {
9414 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9415 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9416 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009417 }
9418
Mike Marciniszyn77241052015-07-30 15:17:43 -04009419 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9420 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009421 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9422 qsfp_mask);
9423 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9424 qsfp_mask);
9425
9426 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009427
9428 /* Handle active low nature of INT_N and MODPRST_N pins */
9429 if (qsfp_mod_present(ppd))
9430 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9431 write_csr(dd,
9432 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9433 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009434}
9435
Dean Luickbbdeb332015-12-01 15:38:15 -05009436/*
9437 * Do a one-time initialize of the LCB block.
9438 */
9439static void init_lcb(struct hfi1_devdata *dd)
9440{
Dean Luicka59329d2016-02-03 14:32:31 -08009441 /* simulator does not correctly handle LCB cclk loopback, skip */
9442 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9443 return;
9444
Dean Luickbbdeb332015-12-01 15:38:15 -05009445 /* the DC has been reset earlier in the driver load */
9446
9447 /* set LCB for cclk loopback on the port */
9448 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9449 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9450 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9451 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9452 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9453 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9454 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9455}
9456
Mike Marciniszyn77241052015-07-30 15:17:43 -04009457int bringup_serdes(struct hfi1_pportdata *ppd)
9458{
9459 struct hfi1_devdata *dd = ppd->dd;
9460 u64 guid;
9461 int ret;
9462
9463 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9464 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9465
9466 guid = ppd->guid;
9467 if (!guid) {
9468 if (dd->base_guid)
9469 guid = dd->base_guid + ppd->port - 1;
9470 ppd->guid = guid;
9471 }
9472
Mike Marciniszyn77241052015-07-30 15:17:43 -04009473 /* Set linkinit_reason on power up per OPA spec */
9474 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9475
Dean Luickbbdeb332015-12-01 15:38:15 -05009476 /* one-time init of the LCB */
9477 init_lcb(dd);
9478
Mike Marciniszyn77241052015-07-30 15:17:43 -04009479 if (loopback) {
9480 ret = init_loopback(dd);
9481 if (ret < 0)
9482 return ret;
9483 }
9484
Easwar Hariharan9775a992016-05-12 10:22:39 -07009485 get_port_type(ppd);
9486 if (ppd->port_type == PORT_TYPE_QSFP) {
9487 set_qsfp_int_n(ppd, 0);
9488 wait_for_qsfp_init(ppd);
9489 set_qsfp_int_n(ppd, 1);
9490 }
9491
9492 /*
9493 * Tune the SerDes to a ballpark setting for
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009494 * optimal signal and bit error rate
9495 * Needs to be done before starting the link
9496 */
9497 tune_serdes(ppd);
9498
Mike Marciniszyn77241052015-07-30 15:17:43 -04009499 return start_link(ppd);
9500}
9501
9502void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9503{
9504 struct hfi1_devdata *dd = ppd->dd;
9505
9506 /*
9507 * Shut down the link and keep it down. First turn off that the
9508 * driver wants to allow the link to be up (driver_link_ready).
9509 * Then make sure the link is not automatically restarted
9510 * (link_enabled). Cancel any pending restart. And finally
9511 * go offline.
9512 */
9513 ppd->driver_link_ready = 0;
9514 ppd->link_enabled = 0;
9515
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009516 ppd->offline_disabled_reason =
9517 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009518 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009519 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009520 set_link_state(ppd, HLS_DN_OFFLINE);
9521
9522 /* disable the port */
9523 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9524}
9525
9526static inline int init_cpu_counters(struct hfi1_devdata *dd)
9527{
9528 struct hfi1_pportdata *ppd;
9529 int i;
9530
9531 ppd = (struct hfi1_pportdata *)(dd + 1);
9532 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009533 ppd->ibport_data.rvp.rc_acks = NULL;
9534 ppd->ibport_data.rvp.rc_qacks = NULL;
9535 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9536 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9537 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9538 if (!ppd->ibport_data.rvp.rc_acks ||
9539 !ppd->ibport_data.rvp.rc_delayed_comp ||
9540 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009541 return -ENOMEM;
9542 }
9543
9544 return 0;
9545}
9546
9547static const char * const pt_names[] = {
9548 "expected",
9549 "eager",
9550 "invalid"
9551};
9552
9553static const char *pt_name(u32 type)
9554{
9555 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9556}
9557
9558/*
9559 * index is the index into the receive array
9560 */
9561void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9562 u32 type, unsigned long pa, u16 order)
9563{
9564 u64 reg;
9565 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9566 (dd->kregbase + RCV_ARRAY));
9567
9568 if (!(dd->flags & HFI1_PRESENT))
9569 goto done;
9570
9571 if (type == PT_INVALID) {
9572 pa = 0;
9573 } else if (type > PT_INVALID) {
9574 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009575 "unexpected receive array type %u for index %u, not handled\n",
9576 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009577 goto done;
9578 }
9579
9580 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9581 pt_name(type), index, pa, (unsigned long)order);
9582
9583#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9584 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9585 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9586 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9587 << RCV_ARRAY_RT_ADDR_SHIFT;
9588 writeq(reg, base + (index * 8));
9589
9590 if (type == PT_EAGER)
9591 /*
9592 * Eager entries are written one-by-one so we have to push them
9593 * after we write the entry.
9594 */
9595 flush_wc();
9596done:
9597 return;
9598}
9599
9600void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9601{
9602 struct hfi1_devdata *dd = rcd->dd;
9603 u32 i;
9604
9605 /* this could be optimized */
9606 for (i = rcd->eager_base; i < rcd->eager_base +
9607 rcd->egrbufs.alloced; i++)
9608 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9609
9610 for (i = rcd->expected_base;
9611 i < rcd->expected_base + rcd->expected_count; i++)
9612 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9613}
9614
9615int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9616 struct hfi1_ctxt_info *kinfo)
9617{
9618 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9619 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9620 return 0;
9621}
9622
9623struct hfi1_message_header *hfi1_get_msgheader(
9624 struct hfi1_devdata *dd, __le32 *rhf_addr)
9625{
9626 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9627
9628 return (struct hfi1_message_header *)
9629 (rhf_addr - dd->rhf_offset + offset);
9630}
9631
9632static const char * const ib_cfg_name_strings[] = {
9633 "HFI1_IB_CFG_LIDLMC",
9634 "HFI1_IB_CFG_LWID_DG_ENB",
9635 "HFI1_IB_CFG_LWID_ENB",
9636 "HFI1_IB_CFG_LWID",
9637 "HFI1_IB_CFG_SPD_ENB",
9638 "HFI1_IB_CFG_SPD",
9639 "HFI1_IB_CFG_RXPOL_ENB",
9640 "HFI1_IB_CFG_LREV_ENB",
9641 "HFI1_IB_CFG_LINKLATENCY",
9642 "HFI1_IB_CFG_HRTBT",
9643 "HFI1_IB_CFG_OP_VLS",
9644 "HFI1_IB_CFG_VL_HIGH_CAP",
9645 "HFI1_IB_CFG_VL_LOW_CAP",
9646 "HFI1_IB_CFG_OVERRUN_THRESH",
9647 "HFI1_IB_CFG_PHYERR_THRESH",
9648 "HFI1_IB_CFG_LINKDEFAULT",
9649 "HFI1_IB_CFG_PKEYS",
9650 "HFI1_IB_CFG_MTU",
9651 "HFI1_IB_CFG_LSTATE",
9652 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9653 "HFI1_IB_CFG_PMA_TICKS",
9654 "HFI1_IB_CFG_PORT"
9655};
9656
9657static const char *ib_cfg_name(int which)
9658{
9659 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9660 return "invalid";
9661 return ib_cfg_name_strings[which];
9662}
9663
9664int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9665{
9666 struct hfi1_devdata *dd = ppd->dd;
9667 int val = 0;
9668
9669 switch (which) {
9670 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9671 val = ppd->link_width_enabled;
9672 break;
9673 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9674 val = ppd->link_width_active;
9675 break;
9676 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9677 val = ppd->link_speed_enabled;
9678 break;
9679 case HFI1_IB_CFG_SPD: /* current Link speed */
9680 val = ppd->link_speed_active;
9681 break;
9682
9683 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9684 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9685 case HFI1_IB_CFG_LINKLATENCY:
9686 goto unimplemented;
9687
9688 case HFI1_IB_CFG_OP_VLS:
9689 val = ppd->vls_operational;
9690 break;
9691 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9692 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9693 break;
9694 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9695 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9696 break;
9697 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9698 val = ppd->overrun_threshold;
9699 break;
9700 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9701 val = ppd->phy_error_threshold;
9702 break;
9703 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9704 val = dd->link_default;
9705 break;
9706
9707 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9708 case HFI1_IB_CFG_PMA_TICKS:
9709 default:
9710unimplemented:
9711 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9712 dd_dev_info(
9713 dd,
9714 "%s: which %s: not implemented\n",
9715 __func__,
9716 ib_cfg_name(which));
9717 break;
9718 }
9719
9720 return val;
9721}
9722
9723/*
9724 * The largest MAD packet size.
9725 */
9726#define MAX_MAD_PACKET 2048
9727
9728/*
9729 * Return the maximum header bytes that can go on the _wire_
9730 * for this device. This count includes the ICRC which is
9731 * not part of the packet held in memory but it is appended
9732 * by the HW.
9733 * This is dependent on the device's receive header entry size.
9734 * HFI allows this to be set per-receive context, but the
9735 * driver presently enforces a global value.
9736 */
9737u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9738{
9739 /*
9740 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9741 * the Receive Header Entry Size minus the PBC (or RHF) size
9742 * plus one DW for the ICRC appended by HW.
9743 *
9744 * dd->rcd[0].rcvhdrqentsize is in DW.
9745 * We use rcd[0] as all context will have the same value. Also,
9746 * the first kernel context would have been allocated by now so
9747 * we are guaranteed a valid value.
9748 */
9749 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9750}
9751
9752/*
9753 * Set Send Length
9754 * @ppd - per port data
9755 *
9756 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9757 * registers compare against LRH.PktLen, so use the max bytes included
9758 * in the LRH.
9759 *
9760 * This routine changes all VL values except VL15, which it maintains at
9761 * the same value.
9762 */
9763static void set_send_length(struct hfi1_pportdata *ppd)
9764{
9765 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009766 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9767 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009768 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9769 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9770 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9771 int i;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009772 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009773
9774 for (i = 0; i < ppd->vls_supported; i++) {
9775 if (dd->vld[i].mtu > maxvlmtu)
9776 maxvlmtu = dd->vld[i].mtu;
9777 if (i <= 3)
9778 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9779 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9780 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9781 else
9782 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9783 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9784 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9785 }
9786 write_csr(dd, SEND_LEN_CHECK0, len1);
9787 write_csr(dd, SEND_LEN_CHECK1, len2);
9788 /* adjust kernel credit return thresholds based on new MTUs */
9789 /* all kernel receive contexts have the same hdrqentsize */
9790 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009791 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9792 sc_mtu_to_threshold(dd->vld[i].sc,
9793 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009794 dd->rcd[0]->rcvhdrqentsize));
Jianxin Xiong44306f12016-04-12 11:30:28 -07009795 sc_set_cr_threshold(dd->vld[i].sc, thres);
9796 }
9797 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9798 sc_mtu_to_threshold(dd->vld[15].sc,
9799 dd->vld[15].mtu,
9800 dd->rcd[0]->rcvhdrqentsize));
9801 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009802
9803 /* Adjust maximum MTU for the port in DC */
9804 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9805 (ilog2(maxvlmtu >> 8) + 1);
9806 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9807 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9808 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9809 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9810 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9811}
9812
9813static void set_lidlmc(struct hfi1_pportdata *ppd)
9814{
9815 int i;
9816 u64 sreg = 0;
9817 struct hfi1_devdata *dd = ppd->dd;
9818 u32 mask = ~((1U << ppd->lmc) - 1);
9819 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9820
9821 if (dd->hfi1_snoop.mode_flag)
9822 dd_dev_info(dd, "Set lid/lmc while snooping");
9823
9824 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9825 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9826 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009827 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009828 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9829 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9830 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9831
9832 /*
9833 * Iterate over all the send contexts and set their SLID check
9834 */
9835 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9836 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9837 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9838 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9839
9840 for (i = 0; i < dd->chip_send_contexts; i++) {
9841 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9842 i, (u32)sreg);
9843 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9844 }
9845
9846 /* Now we have to do the same thing for the sdma engines */
9847 sdma_update_lmc(dd, mask, ppd->lid);
9848}
9849
9850static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9851{
9852 unsigned long timeout;
9853 u32 curr_state;
9854
9855 timeout = jiffies + msecs_to_jiffies(msecs);
9856 while (1) {
9857 curr_state = read_physical_state(dd);
9858 if (curr_state == state)
9859 break;
9860 if (time_after(jiffies, timeout)) {
9861 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009862 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9863 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009864 return -ETIMEDOUT;
9865 }
9866 usleep_range(1950, 2050); /* sleep 2ms-ish */
9867 }
9868
9869 return 0;
9870}
9871
9872/*
9873 * Helper for set_link_state(). Do not call except from that routine.
9874 * Expects ppd->hls_mutex to be held.
9875 *
9876 * @rem_reason value to be sent to the neighbor
9877 *
9878 * LinkDownReasons only set if transition succeeds.
9879 */
9880static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9881{
9882 struct hfi1_devdata *dd = ppd->dd;
9883 u32 pstate, previous_state;
9884 u32 last_local_state;
9885 u32 last_remote_state;
9886 int ret;
9887 int do_transition;
9888 int do_wait;
9889
9890 previous_state = ppd->host_link_state;
9891 ppd->host_link_state = HLS_GOING_OFFLINE;
9892 pstate = read_physical_state(dd);
9893 if (pstate == PLS_OFFLINE) {
9894 do_transition = 0; /* in right state */
9895 do_wait = 0; /* ...no need to wait */
9896 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9897 do_transition = 0; /* in an offline transient state */
9898 do_wait = 1; /* ...wait for it to settle */
9899 } else {
9900 do_transition = 1; /* need to move to offline */
9901 do_wait = 1; /* ...will need to wait */
9902 }
9903
9904 if (do_transition) {
9905 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -08009906 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009907
9908 if (ret != HCMD_SUCCESS) {
9909 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009910 "Failed to transition to Offline link state, return %d\n",
9911 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009912 return -EINVAL;
9913 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009914 if (ppd->offline_disabled_reason ==
9915 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009916 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009917 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009918 }
9919
9920 if (do_wait) {
9921 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009922 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009923 if (ret < 0)
9924 return ret;
9925 }
9926
9927 /* make sure the logical state is also down */
9928 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9929
9930 /*
9931 * Now in charge of LCB - must be after the physical state is
9932 * offline.quiet and before host_link_state is changed.
9933 */
9934 set_host_lcb_access(dd);
9935 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9936 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9937
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009938 if (ppd->port_type == PORT_TYPE_QSFP &&
9939 ppd->qsfp_info.limiting_active &&
9940 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -08009941 int ret;
9942
9943 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9944 if (ret == 0) {
9945 set_qsfp_tx(ppd, 0);
9946 release_chip_resource(dd, qsfp_resource(dd));
9947 } else {
9948 /* not fatal, but should warn */
9949 dd_dev_err(dd,
9950 "Unable to acquire lock to turn off QSFP TX\n");
9951 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009952 }
9953
Mike Marciniszyn77241052015-07-30 15:17:43 -04009954 /*
9955 * The LNI has a mandatory wait time after the physical state
9956 * moves to Offline.Quiet. The wait time may be different
9957 * depending on how the link went down. The 8051 firmware
9958 * will observe the needed wait time and only move to ready
9959 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009960 * is 6s, so wait that long and then at least 0.5s more for
9961 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009962 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009963 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009964 if (ret) {
9965 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009966 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009967 /* state is really offline, so make it so */
9968 ppd->host_link_state = HLS_DN_OFFLINE;
9969 return ret;
9970 }
9971
9972 /*
9973 * The state is now offline and the 8051 is ready to accept host
9974 * requests.
9975 * - change our state
9976 * - notify others if we were previously in a linkup state
9977 */
9978 ppd->host_link_state = HLS_DN_OFFLINE;
9979 if (previous_state & HLS_UP) {
9980 /* went down while link was up */
9981 handle_linkup_change(dd, 0);
9982 } else if (previous_state
9983 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9984 /* went down while attempting link up */
9985 /* byte 1 of last_*_state is the failure reason */
9986 read_last_local_state(dd, &last_local_state);
9987 read_last_remote_state(dd, &last_remote_state);
9988 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009989 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9990 last_local_state, last_remote_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009991 }
9992
9993 /* the active link width (downgrade) is 0 on link down */
9994 ppd->link_width_active = 0;
9995 ppd->link_width_downgrade_tx_active = 0;
9996 ppd->link_width_downgrade_rx_active = 0;
9997 ppd->current_egress_rate = 0;
9998 return 0;
9999}
10000
10001/* return the link state name */
10002static const char *link_state_name(u32 state)
10003{
10004 const char *name;
10005 int n = ilog2(state);
10006 static const char * const names[] = {
10007 [__HLS_UP_INIT_BP] = "INIT",
10008 [__HLS_UP_ARMED_BP] = "ARMED",
10009 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10010 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10011 [__HLS_DN_POLL_BP] = "POLL",
10012 [__HLS_DN_DISABLE_BP] = "DISABLE",
10013 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10014 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10015 [__HLS_GOING_UP_BP] = "GOING_UP",
10016 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10017 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10018 };
10019
10020 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10021 return name ? name : "unknown";
10022}
10023
10024/* return the link state reason name */
10025static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10026{
10027 if (state == HLS_UP_INIT) {
10028 switch (ppd->linkinit_reason) {
10029 case OPA_LINKINIT_REASON_LINKUP:
10030 return "(LINKUP)";
10031 case OPA_LINKINIT_REASON_FLAPPING:
10032 return "(FLAPPING)";
10033 case OPA_LINKINIT_OUTSIDE_POLICY:
10034 return "(OUTSIDE_POLICY)";
10035 case OPA_LINKINIT_QUARANTINED:
10036 return "(QUARANTINED)";
10037 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10038 return "(INSUFIC_CAPABILITY)";
10039 default:
10040 break;
10041 }
10042 }
10043 return "";
10044}
10045
10046/*
10047 * driver_physical_state - convert the driver's notion of a port's
10048 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10049 * Return -1 (converted to a u32) to indicate error.
10050 */
10051u32 driver_physical_state(struct hfi1_pportdata *ppd)
10052{
10053 switch (ppd->host_link_state) {
10054 case HLS_UP_INIT:
10055 case HLS_UP_ARMED:
10056 case HLS_UP_ACTIVE:
10057 return IB_PORTPHYSSTATE_LINKUP;
10058 case HLS_DN_POLL:
10059 return IB_PORTPHYSSTATE_POLLING;
10060 case HLS_DN_DISABLE:
10061 return IB_PORTPHYSSTATE_DISABLED;
10062 case HLS_DN_OFFLINE:
10063 return OPA_PORTPHYSSTATE_OFFLINE;
10064 case HLS_VERIFY_CAP:
10065 return IB_PORTPHYSSTATE_POLLING;
10066 case HLS_GOING_UP:
10067 return IB_PORTPHYSSTATE_POLLING;
10068 case HLS_GOING_OFFLINE:
10069 return OPA_PORTPHYSSTATE_OFFLINE;
10070 case HLS_LINK_COOLDOWN:
10071 return OPA_PORTPHYSSTATE_OFFLINE;
10072 case HLS_DN_DOWNDEF:
10073 default:
10074 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10075 ppd->host_link_state);
10076 return -1;
10077 }
10078}
10079
10080/*
10081 * driver_logical_state - convert the driver's notion of a port's
10082 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10083 * (converted to a u32) to indicate error.
10084 */
10085u32 driver_logical_state(struct hfi1_pportdata *ppd)
10086{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010087 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010088 return IB_PORT_DOWN;
10089
10090 switch (ppd->host_link_state & HLS_UP) {
10091 case HLS_UP_INIT:
10092 return IB_PORT_INIT;
10093 case HLS_UP_ARMED:
10094 return IB_PORT_ARMED;
10095 case HLS_UP_ACTIVE:
10096 return IB_PORT_ACTIVE;
10097 default:
10098 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10099 ppd->host_link_state);
10100 return -1;
10101 }
10102}
10103
10104void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10105 u8 neigh_reason, u8 rem_reason)
10106{
10107 if (ppd->local_link_down_reason.latest == 0 &&
10108 ppd->neigh_link_down_reason.latest == 0) {
10109 ppd->local_link_down_reason.latest = lcl_reason;
10110 ppd->neigh_link_down_reason.latest = neigh_reason;
10111 ppd->remote_link_down_reason = rem_reason;
10112 }
10113}
10114
10115/*
10116 * Change the physical and/or logical link state.
10117 *
10118 * Do not call this routine while inside an interrupt. It contains
10119 * calls to routines that can take multiple seconds to finish.
10120 *
10121 * Returns 0 on success, -errno on failure.
10122 */
10123int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10124{
10125 struct hfi1_devdata *dd = ppd->dd;
10126 struct ib_event event = {.device = NULL};
10127 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010128 int orig_new_state, poll_bounce;
10129
10130 mutex_lock(&ppd->hls_lock);
10131
10132 orig_new_state = state;
10133 if (state == HLS_DN_DOWNDEF)
10134 state = dd->link_default;
10135
10136 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010137 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10138 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010139
10140 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010141 link_state_name(ppd->host_link_state),
10142 link_state_name(orig_new_state),
10143 poll_bounce ? "(bounce) " : "",
10144 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010145
Mike Marciniszyn77241052015-07-30 15:17:43 -040010146 /*
10147 * If we're going to a (HLS_*) link state that implies the logical
10148 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10149 * reset is_sm_config_started to 0.
10150 */
10151 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10152 ppd->is_sm_config_started = 0;
10153
10154 /*
10155 * Do nothing if the states match. Let a poll to poll link bounce
10156 * go through.
10157 */
10158 if (ppd->host_link_state == state && !poll_bounce)
10159 goto done;
10160
10161 switch (state) {
10162 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010163 if (ppd->host_link_state == HLS_DN_POLL &&
10164 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010165 /*
10166 * Quick link up jumps from polling to here.
10167 *
10168 * Whether in normal or loopback mode, the
10169 * simulator jumps from polling to link up.
10170 * Accept that here.
10171 */
Jubin John17fb4f22016-02-14 20:21:52 -080010172 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010173 } else if (ppd->host_link_state != HLS_GOING_UP) {
10174 goto unexpected;
10175 }
10176
10177 ppd->host_link_state = HLS_UP_INIT;
10178 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10179 if (ret) {
10180 /* logical state didn't change, stay at going_up */
10181 ppd->host_link_state = HLS_GOING_UP;
10182 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010183 "%s: logical state did not change to INIT\n",
10184 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010185 } else {
10186 /* clear old transient LINKINIT_REASON code */
10187 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10188 ppd->linkinit_reason =
10189 OPA_LINKINIT_REASON_LINKUP;
10190
10191 /* enable the port */
10192 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10193
10194 handle_linkup_change(dd, 1);
10195 }
10196 break;
10197 case HLS_UP_ARMED:
10198 if (ppd->host_link_state != HLS_UP_INIT)
10199 goto unexpected;
10200
10201 ppd->host_link_state = HLS_UP_ARMED;
10202 set_logical_state(dd, LSTATE_ARMED);
10203 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10204 if (ret) {
10205 /* logical state didn't change, stay at init */
10206 ppd->host_link_state = HLS_UP_INIT;
10207 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010208 "%s: logical state did not change to ARMED\n",
10209 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010210 }
10211 /*
10212 * The simulator does not currently implement SMA messages,
10213 * so neighbor_normal is not set. Set it here when we first
10214 * move to Armed.
10215 */
10216 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10217 ppd->neighbor_normal = 1;
10218 break;
10219 case HLS_UP_ACTIVE:
10220 if (ppd->host_link_state != HLS_UP_ARMED)
10221 goto unexpected;
10222
10223 ppd->host_link_state = HLS_UP_ACTIVE;
10224 set_logical_state(dd, LSTATE_ACTIVE);
10225 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10226 if (ret) {
10227 /* logical state didn't change, stay at armed */
10228 ppd->host_link_state = HLS_UP_ARMED;
10229 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010230 "%s: logical state did not change to ACTIVE\n",
10231 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010232 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010233 /* tell all engines to go running */
10234 sdma_all_running(dd);
10235
10236 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010237 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010238 event.element.port_num = ppd->port;
10239 event.event = IB_EVENT_PORT_ACTIVE;
10240 }
10241 break;
10242 case HLS_DN_POLL:
10243 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10244 ppd->host_link_state == HLS_DN_OFFLINE) &&
10245 dd->dc_shutdown)
10246 dc_start(dd);
10247 /* Hand LED control to the DC */
10248 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10249
10250 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10251 u8 tmp = ppd->link_enabled;
10252
10253 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10254 if (ret) {
10255 ppd->link_enabled = tmp;
10256 break;
10257 }
10258 ppd->remote_link_down_reason = 0;
10259
10260 if (ppd->driver_link_ready)
10261 ppd->link_enabled = 1;
10262 }
10263
Jim Snowfb9036d2016-01-11 18:32:21 -050010264 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010265 ret = set_local_link_attributes(ppd);
10266 if (ret)
10267 break;
10268
10269 ppd->port_error_action = 0;
10270 ppd->host_link_state = HLS_DN_POLL;
10271
10272 if (quick_linkup) {
10273 /* quick linkup does not go into polling */
10274 ret = do_quick_linkup(dd);
10275 } else {
10276 ret1 = set_physical_link_state(dd, PLS_POLLING);
10277 if (ret1 != HCMD_SUCCESS) {
10278 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010279 "Failed to transition to Polling link state, return 0x%x\n",
10280 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010281 ret = -EINVAL;
10282 }
10283 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010284 ppd->offline_disabled_reason =
10285 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010286 /*
10287 * If an error occurred above, go back to offline. The
10288 * caller may reschedule another attempt.
10289 */
10290 if (ret)
10291 goto_offline(ppd, 0);
10292 break;
10293 case HLS_DN_DISABLE:
10294 /* link is disabled */
10295 ppd->link_enabled = 0;
10296
10297 /* allow any state to transition to disabled */
10298
10299 /* must transition to offline first */
10300 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10301 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10302 if (ret)
10303 break;
10304 ppd->remote_link_down_reason = 0;
10305 }
10306
10307 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10308 if (ret1 != HCMD_SUCCESS) {
10309 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010310 "Failed to transition to Disabled link state, return 0x%x\n",
10311 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010312 ret = -EINVAL;
10313 break;
10314 }
10315 ppd->host_link_state = HLS_DN_DISABLE;
10316 dc_shutdown(dd);
10317 break;
10318 case HLS_DN_OFFLINE:
10319 if (ppd->host_link_state == HLS_DN_DISABLE)
10320 dc_start(dd);
10321
10322 /* allow any state to transition to offline */
10323 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10324 if (!ret)
10325 ppd->remote_link_down_reason = 0;
10326 break;
10327 case HLS_VERIFY_CAP:
10328 if (ppd->host_link_state != HLS_DN_POLL)
10329 goto unexpected;
10330 ppd->host_link_state = HLS_VERIFY_CAP;
10331 break;
10332 case HLS_GOING_UP:
10333 if (ppd->host_link_state != HLS_VERIFY_CAP)
10334 goto unexpected;
10335
10336 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10337 if (ret1 != HCMD_SUCCESS) {
10338 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010339 "Failed to transition to link up state, return 0x%x\n",
10340 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010341 ret = -EINVAL;
10342 break;
10343 }
10344 ppd->host_link_state = HLS_GOING_UP;
10345 break;
10346
10347 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10348 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10349 default:
10350 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010351 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010352 ret = -EINVAL;
10353 break;
10354 }
10355
Mike Marciniszyn77241052015-07-30 15:17:43 -040010356 goto done;
10357
10358unexpected:
10359 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010360 __func__, link_state_name(ppd->host_link_state),
10361 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010362 ret = -EINVAL;
10363
10364done:
10365 mutex_unlock(&ppd->hls_lock);
10366
10367 if (event.device)
10368 ib_dispatch_event(&event);
10369
10370 return ret;
10371}
10372
10373int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10374{
10375 u64 reg;
10376 int ret = 0;
10377
10378 switch (which) {
10379 case HFI1_IB_CFG_LIDLMC:
10380 set_lidlmc(ppd);
10381 break;
10382 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10383 /*
10384 * The VL Arbitrator high limit is sent in units of 4k
10385 * bytes, while HFI stores it in units of 64 bytes.
10386 */
Jubin John8638b772016-02-14 20:19:24 -080010387 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010388 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10389 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10390 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10391 break;
10392 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10393 /* HFI only supports POLL as the default link down state */
10394 if (val != HLS_DN_POLL)
10395 ret = -EINVAL;
10396 break;
10397 case HFI1_IB_CFG_OP_VLS:
10398 if (ppd->vls_operational != val) {
10399 ppd->vls_operational = val;
10400 if (!ppd->port)
10401 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010402 }
10403 break;
10404 /*
10405 * For link width, link width downgrade, and speed enable, always AND
10406 * the setting with what is actually supported. This has two benefits.
10407 * First, enabled can't have unsupported values, no matter what the
10408 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10409 * "fill in with your supported value" have all the bits in the
10410 * field set, so simply ANDing with supported has the desired result.
10411 */
10412 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10413 ppd->link_width_enabled = val & ppd->link_width_supported;
10414 break;
10415 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10416 ppd->link_width_downgrade_enabled =
10417 val & ppd->link_width_downgrade_supported;
10418 break;
10419 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10420 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10421 break;
10422 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10423 /*
10424 * HFI does not follow IB specs, save this value
10425 * so we can report it, if asked.
10426 */
10427 ppd->overrun_threshold = val;
10428 break;
10429 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10430 /*
10431 * HFI does not follow IB specs, save this value
10432 * so we can report it, if asked.
10433 */
10434 ppd->phy_error_threshold = val;
10435 break;
10436
10437 case HFI1_IB_CFG_MTU:
10438 set_send_length(ppd);
10439 break;
10440
10441 case HFI1_IB_CFG_PKEYS:
10442 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10443 set_partition_keys(ppd);
10444 break;
10445
10446 default:
10447 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10448 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010449 "%s: which %s, val 0x%x: not implemented\n",
10450 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010451 break;
10452 }
10453 return ret;
10454}
10455
10456/* begin functions related to vl arbitration table caching */
10457static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10458{
10459 int i;
10460
10461 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10462 VL_ARB_LOW_PRIO_TABLE_SIZE);
10463 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10464 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10465
10466 /*
10467 * Note that we always return values directly from the
10468 * 'vl_arb_cache' (and do no CSR reads) in response to a
10469 * 'Get(VLArbTable)'. This is obviously correct after a
10470 * 'Set(VLArbTable)', since the cache will then be up to
10471 * date. But it's also correct prior to any 'Set(VLArbTable)'
10472 * since then both the cache, and the relevant h/w registers
10473 * will be zeroed.
10474 */
10475
10476 for (i = 0; i < MAX_PRIO_TABLE; i++)
10477 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10478}
10479
10480/*
10481 * vl_arb_lock_cache
10482 *
10483 * All other vl_arb_* functions should be called only after locking
10484 * the cache.
10485 */
10486static inline struct vl_arb_cache *
10487vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10488{
10489 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10490 return NULL;
10491 spin_lock(&ppd->vl_arb_cache[idx].lock);
10492 return &ppd->vl_arb_cache[idx];
10493}
10494
10495static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10496{
10497 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10498}
10499
10500static void vl_arb_get_cache(struct vl_arb_cache *cache,
10501 struct ib_vl_weight_elem *vl)
10502{
10503 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10504}
10505
10506static void vl_arb_set_cache(struct vl_arb_cache *cache,
10507 struct ib_vl_weight_elem *vl)
10508{
10509 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10510}
10511
10512static int vl_arb_match_cache(struct vl_arb_cache *cache,
10513 struct ib_vl_weight_elem *vl)
10514{
10515 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10516}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010517
Mike Marciniszyn77241052015-07-30 15:17:43 -040010518/* end functions related to vl arbitration table caching */
10519
10520static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10521 u32 size, struct ib_vl_weight_elem *vl)
10522{
10523 struct hfi1_devdata *dd = ppd->dd;
10524 u64 reg;
10525 unsigned int i, is_up = 0;
10526 int drain, ret = 0;
10527
10528 mutex_lock(&ppd->hls_lock);
10529
10530 if (ppd->host_link_state & HLS_UP)
10531 is_up = 1;
10532
10533 drain = !is_ax(dd) && is_up;
10534
10535 if (drain)
10536 /*
10537 * Before adjusting VL arbitration weights, empty per-VL
10538 * FIFOs, otherwise a packet whose VL weight is being
10539 * set to 0 could get stuck in a FIFO with no chance to
10540 * egress.
10541 */
10542 ret = stop_drain_data_vls(dd);
10543
10544 if (ret) {
10545 dd_dev_err(
10546 dd,
10547 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10548 __func__);
10549 goto err;
10550 }
10551
10552 for (i = 0; i < size; i++, vl++) {
10553 /*
10554 * NOTE: The low priority shift and mask are used here, but
10555 * they are the same for both the low and high registers.
10556 */
10557 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10558 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10559 | (((u64)vl->weight
10560 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10561 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10562 write_csr(dd, target + (i * 8), reg);
10563 }
10564 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10565
10566 if (drain)
10567 open_fill_data_vls(dd); /* reopen all VLs */
10568
10569err:
10570 mutex_unlock(&ppd->hls_lock);
10571
10572 return ret;
10573}
10574
10575/*
10576 * Read one credit merge VL register.
10577 */
10578static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10579 struct vl_limit *vll)
10580{
10581 u64 reg = read_csr(dd, csr);
10582
10583 vll->dedicated = cpu_to_be16(
10584 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10585 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10586 vll->shared = cpu_to_be16(
10587 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10588 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10589}
10590
10591/*
10592 * Read the current credit merge limits.
10593 */
10594static int get_buffer_control(struct hfi1_devdata *dd,
10595 struct buffer_control *bc, u16 *overall_limit)
10596{
10597 u64 reg;
10598 int i;
10599
10600 /* not all entries are filled in */
10601 memset(bc, 0, sizeof(*bc));
10602
10603 /* OPA and HFI have a 1-1 mapping */
10604 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010605 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010606
10607 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10608 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10609
10610 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10611 bc->overall_shared_limit = cpu_to_be16(
10612 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10613 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10614 if (overall_limit)
10615 *overall_limit = (reg
10616 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10617 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10618 return sizeof(struct buffer_control);
10619}
10620
10621static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10622{
10623 u64 reg;
10624 int i;
10625
10626 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10627 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10628 for (i = 0; i < sizeof(u64); i++) {
10629 u8 byte = *(((u8 *)&reg) + i);
10630
10631 dp->vlnt[2 * i] = byte & 0xf;
10632 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10633 }
10634
10635 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10636 for (i = 0; i < sizeof(u64); i++) {
10637 u8 byte = *(((u8 *)&reg) + i);
10638
10639 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10640 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10641 }
10642 return sizeof(struct sc2vlnt);
10643}
10644
10645static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10646 struct ib_vl_weight_elem *vl)
10647{
10648 unsigned int i;
10649
10650 for (i = 0; i < nelems; i++, vl++) {
10651 vl->vl = 0xf;
10652 vl->weight = 0;
10653 }
10654}
10655
10656static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10657{
10658 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010659 DC_SC_VL_VAL(15_0,
10660 0, dp->vlnt[0] & 0xf,
10661 1, dp->vlnt[1] & 0xf,
10662 2, dp->vlnt[2] & 0xf,
10663 3, dp->vlnt[3] & 0xf,
10664 4, dp->vlnt[4] & 0xf,
10665 5, dp->vlnt[5] & 0xf,
10666 6, dp->vlnt[6] & 0xf,
10667 7, dp->vlnt[7] & 0xf,
10668 8, dp->vlnt[8] & 0xf,
10669 9, dp->vlnt[9] & 0xf,
10670 10, dp->vlnt[10] & 0xf,
10671 11, dp->vlnt[11] & 0xf,
10672 12, dp->vlnt[12] & 0xf,
10673 13, dp->vlnt[13] & 0xf,
10674 14, dp->vlnt[14] & 0xf,
10675 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010676 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080010677 DC_SC_VL_VAL(31_16,
10678 16, dp->vlnt[16] & 0xf,
10679 17, dp->vlnt[17] & 0xf,
10680 18, dp->vlnt[18] & 0xf,
10681 19, dp->vlnt[19] & 0xf,
10682 20, dp->vlnt[20] & 0xf,
10683 21, dp->vlnt[21] & 0xf,
10684 22, dp->vlnt[22] & 0xf,
10685 23, dp->vlnt[23] & 0xf,
10686 24, dp->vlnt[24] & 0xf,
10687 25, dp->vlnt[25] & 0xf,
10688 26, dp->vlnt[26] & 0xf,
10689 27, dp->vlnt[27] & 0xf,
10690 28, dp->vlnt[28] & 0xf,
10691 29, dp->vlnt[29] & 0xf,
10692 30, dp->vlnt[30] & 0xf,
10693 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010694}
10695
10696static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10697 u16 limit)
10698{
10699 if (limit != 0)
10700 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010701 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010702}
10703
10704/* change only the shared limit portion of SendCmGLobalCredit */
10705static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10706{
10707 u64 reg;
10708
10709 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10710 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10711 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10712 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10713}
10714
10715/* change only the total credit limit portion of SendCmGLobalCredit */
10716static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10717{
10718 u64 reg;
10719
10720 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10721 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10722 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10723 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10724}
10725
10726/* set the given per-VL shared limit */
10727static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10728{
10729 u64 reg;
10730 u32 addr;
10731
10732 if (vl < TXE_NUM_DATA_VL)
10733 addr = SEND_CM_CREDIT_VL + (8 * vl);
10734 else
10735 addr = SEND_CM_CREDIT_VL15;
10736
10737 reg = read_csr(dd, addr);
10738 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10739 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10740 write_csr(dd, addr, reg);
10741}
10742
10743/* set the given per-VL dedicated limit */
10744static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10745{
10746 u64 reg;
10747 u32 addr;
10748
10749 if (vl < TXE_NUM_DATA_VL)
10750 addr = SEND_CM_CREDIT_VL + (8 * vl);
10751 else
10752 addr = SEND_CM_CREDIT_VL15;
10753
10754 reg = read_csr(dd, addr);
10755 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10756 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10757 write_csr(dd, addr, reg);
10758}
10759
10760/* spin until the given per-VL status mask bits clear */
10761static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10762 const char *which)
10763{
10764 unsigned long timeout;
10765 u64 reg;
10766
10767 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10768 while (1) {
10769 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10770
10771 if (reg == 0)
10772 return; /* success */
10773 if (time_after(jiffies, timeout))
10774 break; /* timed out */
10775 udelay(1);
10776 }
10777
10778 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010779 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10780 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010781 /*
10782 * If this occurs, it is likely there was a credit loss on the link.
10783 * The only recovery from that is a link bounce.
10784 */
10785 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010786 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010787}
10788
10789/*
10790 * The number of credits on the VLs may be changed while everything
10791 * is "live", but the following algorithm must be followed due to
10792 * how the hardware is actually implemented. In particular,
10793 * Return_Credit_Status[] is the only correct status check.
10794 *
10795 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10796 * set Global_Shared_Credit_Limit = 0
10797 * use_all_vl = 1
10798 * mask0 = all VLs that are changing either dedicated or shared limits
10799 * set Shared_Limit[mask0] = 0
10800 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10801 * if (changing any dedicated limit)
10802 * mask1 = all VLs that are lowering dedicated limits
10803 * lower Dedicated_Limit[mask1]
10804 * spin until Return_Credit_Status[mask1] == 0
10805 * raise Dedicated_Limits
10806 * raise Shared_Limits
10807 * raise Global_Shared_Credit_Limit
10808 *
10809 * lower = if the new limit is lower, set the limit to the new value
10810 * raise = if the new limit is higher than the current value (may be changed
10811 * earlier in the algorithm), set the new limit to the new value
10812 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010813int set_buffer_control(struct hfi1_pportdata *ppd,
10814 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010815{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010816 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010817 u64 changing_mask, ld_mask, stat_mask;
10818 int change_count;
10819 int i, use_all_mask;
10820 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010821 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010822 /*
10823 * A0: add the variable any_shared_limit_changing below and in the
10824 * algorithm above. If removing A0 support, it can be removed.
10825 */
10826 int any_shared_limit_changing;
10827 struct buffer_control cur_bc;
10828 u8 changing[OPA_MAX_VLS];
10829 u8 lowering_dedicated[OPA_MAX_VLS];
10830 u16 cur_total;
10831 u32 new_total = 0;
10832 const u64 all_mask =
10833 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10834 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10835 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10836 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10837 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10838 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10839 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10840 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10841 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10842
10843#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10844#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10845
Mike Marciniszyn77241052015-07-30 15:17:43 -040010846 /* find the new total credits, do sanity check on unused VLs */
10847 for (i = 0; i < OPA_MAX_VLS; i++) {
10848 if (valid_vl(i)) {
10849 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10850 continue;
10851 }
10852 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080010853 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010854 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080010855 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010856 new_bc->vl[i].dedicated = 0;
10857 new_bc->vl[i].shared = 0;
10858 }
10859 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010860
Mike Marciniszyn77241052015-07-30 15:17:43 -040010861 /* fetch the current values */
10862 get_buffer_control(dd, &cur_bc, &cur_total);
10863
10864 /*
10865 * Create the masks we will use.
10866 */
10867 memset(changing, 0, sizeof(changing));
10868 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080010869 /*
10870 * NOTE: Assumes that the individual VL bits are adjacent and in
10871 * increasing order
10872 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010873 stat_mask =
10874 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10875 changing_mask = 0;
10876 ld_mask = 0;
10877 change_count = 0;
10878 any_shared_limit_changing = 0;
10879 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10880 if (!valid_vl(i))
10881 continue;
10882 this_shared_changing = new_bc->vl[i].shared
10883 != cur_bc.vl[i].shared;
10884 if (this_shared_changing)
10885 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010886 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10887 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010888 changing[i] = 1;
10889 changing_mask |= stat_mask;
10890 change_count++;
10891 }
10892 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10893 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10894 lowering_dedicated[i] = 1;
10895 ld_mask |= stat_mask;
10896 }
10897 }
10898
10899 /* bracket the credit change with a total adjustment */
10900 if (new_total > cur_total)
10901 set_global_limit(dd, new_total);
10902
10903 /*
10904 * Start the credit change algorithm.
10905 */
10906 use_all_mask = 0;
10907 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010908 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10909 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010910 set_global_shared(dd, 0);
10911 cur_bc.overall_shared_limit = 0;
10912 use_all_mask = 1;
10913 }
10914
10915 for (i = 0; i < NUM_USABLE_VLS; i++) {
10916 if (!valid_vl(i))
10917 continue;
10918
10919 if (changing[i]) {
10920 set_vl_shared(dd, i, 0);
10921 cur_bc.vl[i].shared = 0;
10922 }
10923 }
10924
10925 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080010926 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010927
10928 if (change_count > 0) {
10929 for (i = 0; i < NUM_USABLE_VLS; i++) {
10930 if (!valid_vl(i))
10931 continue;
10932
10933 if (lowering_dedicated[i]) {
10934 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010935 be16_to_cpu(new_bc->
10936 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010937 cur_bc.vl[i].dedicated =
10938 new_bc->vl[i].dedicated;
10939 }
10940 }
10941
10942 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10943
10944 /* now raise all dedicated that are going up */
10945 for (i = 0; i < NUM_USABLE_VLS; i++) {
10946 if (!valid_vl(i))
10947 continue;
10948
10949 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10950 be16_to_cpu(cur_bc.vl[i].dedicated))
10951 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010952 be16_to_cpu(new_bc->
10953 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010954 }
10955 }
10956
10957 /* next raise all shared that are going up */
10958 for (i = 0; i < NUM_USABLE_VLS; i++) {
10959 if (!valid_vl(i))
10960 continue;
10961
10962 if (be16_to_cpu(new_bc->vl[i].shared) >
10963 be16_to_cpu(cur_bc.vl[i].shared))
10964 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10965 }
10966
10967 /* finally raise the global shared */
10968 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080010969 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010970 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010971 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010972
10973 /* bracket the credit change with a total adjustment */
10974 if (new_total < cur_total)
10975 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010976
10977 /*
10978 * Determine the actual number of operational VLS using the number of
10979 * dedicated and shared credits for each VL.
10980 */
10981 if (change_count > 0) {
10982 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10983 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10984 be16_to_cpu(new_bc->vl[i].shared) > 0)
10985 vl_count++;
10986 ppd->actual_vls_operational = vl_count;
10987 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10988 ppd->actual_vls_operational :
10989 ppd->vls_operational,
10990 NULL);
10991 if (ret == 0)
10992 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10993 ppd->actual_vls_operational :
10994 ppd->vls_operational, NULL);
10995 if (ret)
10996 return ret;
10997 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040010998 return 0;
10999}
11000
11001/*
11002 * Read the given fabric manager table. Return the size of the
11003 * table (in bytes) on success, and a negative error code on
11004 * failure.
11005 */
11006int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11007
11008{
11009 int size;
11010 struct vl_arb_cache *vlc;
11011
11012 switch (which) {
11013 case FM_TBL_VL_HIGH_ARB:
11014 size = 256;
11015 /*
11016 * OPA specifies 128 elements (of 2 bytes each), though
11017 * HFI supports only 16 elements in h/w.
11018 */
11019 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11020 vl_arb_get_cache(vlc, t);
11021 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11022 break;
11023 case FM_TBL_VL_LOW_ARB:
11024 size = 256;
11025 /*
11026 * OPA specifies 128 elements (of 2 bytes each), though
11027 * HFI supports only 16 elements in h/w.
11028 */
11029 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11030 vl_arb_get_cache(vlc, t);
11031 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11032 break;
11033 case FM_TBL_BUFFER_CONTROL:
11034 size = get_buffer_control(ppd->dd, t, NULL);
11035 break;
11036 case FM_TBL_SC2VLNT:
11037 size = get_sc2vlnt(ppd->dd, t);
11038 break;
11039 case FM_TBL_VL_PREEMPT_ELEMS:
11040 size = 256;
11041 /* OPA specifies 128 elements, of 2 bytes each */
11042 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11043 break;
11044 case FM_TBL_VL_PREEMPT_MATRIX:
11045 size = 256;
11046 /*
11047 * OPA specifies that this is the same size as the VL
11048 * arbitration tables (i.e., 256 bytes).
11049 */
11050 break;
11051 default:
11052 return -EINVAL;
11053 }
11054 return size;
11055}
11056
11057/*
11058 * Write the given fabric manager table.
11059 */
11060int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11061{
11062 int ret = 0;
11063 struct vl_arb_cache *vlc;
11064
11065 switch (which) {
11066 case FM_TBL_VL_HIGH_ARB:
11067 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11068 if (vl_arb_match_cache(vlc, t)) {
11069 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11070 break;
11071 }
11072 vl_arb_set_cache(vlc, t);
11073 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11074 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11075 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11076 break;
11077 case FM_TBL_VL_LOW_ARB:
11078 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11079 if (vl_arb_match_cache(vlc, t)) {
11080 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11081 break;
11082 }
11083 vl_arb_set_cache(vlc, t);
11084 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11085 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11086 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11087 break;
11088 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011089 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011090 break;
11091 case FM_TBL_SC2VLNT:
11092 set_sc2vlnt(ppd->dd, t);
11093 break;
11094 default:
11095 ret = -EINVAL;
11096 }
11097 return ret;
11098}
11099
11100/*
11101 * Disable all data VLs.
11102 *
11103 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11104 */
11105static int disable_data_vls(struct hfi1_devdata *dd)
11106{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011107 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011108 return 1;
11109
11110 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11111
11112 return 0;
11113}
11114
11115/*
11116 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11117 * Just re-enables all data VLs (the "fill" part happens
11118 * automatically - the name was chosen for symmetry with
11119 * stop_drain_data_vls()).
11120 *
11121 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11122 */
11123int open_fill_data_vls(struct hfi1_devdata *dd)
11124{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011125 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011126 return 1;
11127
11128 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11129
11130 return 0;
11131}
11132
11133/*
11134 * drain_data_vls() - assumes that disable_data_vls() has been called,
11135 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11136 * engines to drop to 0.
11137 */
11138static void drain_data_vls(struct hfi1_devdata *dd)
11139{
11140 sc_wait(dd);
11141 sdma_wait(dd);
11142 pause_for_credit_return(dd);
11143}
11144
11145/*
11146 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11147 *
11148 * Use open_fill_data_vls() to resume using data VLs. This pair is
11149 * meant to be used like this:
11150 *
11151 * stop_drain_data_vls(dd);
11152 * // do things with per-VL resources
11153 * open_fill_data_vls(dd);
11154 */
11155int stop_drain_data_vls(struct hfi1_devdata *dd)
11156{
11157 int ret;
11158
11159 ret = disable_data_vls(dd);
11160 if (ret == 0)
11161 drain_data_vls(dd);
11162
11163 return ret;
11164}
11165
11166/*
11167 * Convert a nanosecond time to a cclock count. No matter how slow
11168 * the cclock, a non-zero ns will always have a non-zero result.
11169 */
11170u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11171{
11172 u32 cclocks;
11173
11174 if (dd->icode == ICODE_FPGA_EMULATION)
11175 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11176 else /* simulation pretends to be ASIC */
11177 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11178 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11179 cclocks = 1;
11180 return cclocks;
11181}
11182
11183/*
11184 * Convert a cclock count to nanoseconds. Not matter how slow
11185 * the cclock, a non-zero cclocks will always have a non-zero result.
11186 */
11187u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11188{
11189 u32 ns;
11190
11191 if (dd->icode == ICODE_FPGA_EMULATION)
11192 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11193 else /* simulation pretends to be ASIC */
11194 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11195 if (cclocks && !ns)
11196 ns = 1;
11197 return ns;
11198}
11199
11200/*
11201 * Dynamically adjust the receive interrupt timeout for a context based on
11202 * incoming packet rate.
11203 *
11204 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11205 */
11206static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11207{
11208 struct hfi1_devdata *dd = rcd->dd;
11209 u32 timeout = rcd->rcvavail_timeout;
11210
11211 /*
11212 * This algorithm doubles or halves the timeout depending on whether
11213 * the number of packets received in this interrupt were less than or
11214 * greater equal the interrupt count.
11215 *
11216 * The calculations below do not allow a steady state to be achieved.
11217 * Only at the endpoints it is possible to have an unchanging
11218 * timeout.
11219 */
11220 if (npkts < rcv_intr_count) {
11221 /*
11222 * Not enough packets arrived before the timeout, adjust
11223 * timeout downward.
11224 */
11225 if (timeout < 2) /* already at minimum? */
11226 return;
11227 timeout >>= 1;
11228 } else {
11229 /*
11230 * More than enough packets arrived before the timeout, adjust
11231 * timeout upward.
11232 */
11233 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11234 return;
11235 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11236 }
11237
11238 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011239 /*
11240 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11241 * been verified to be in range
11242 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011243 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011244 (u64)timeout <<
11245 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011246}
11247
11248void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11249 u32 intr_adjust, u32 npkts)
11250{
11251 struct hfi1_devdata *dd = rcd->dd;
11252 u64 reg;
11253 u32 ctxt = rcd->ctxt;
11254
11255 /*
11256 * Need to write timeout register before updating RcvHdrHead to ensure
11257 * that a new value is used when the HW decides to restart counting.
11258 */
11259 if (intr_adjust)
11260 adjust_rcv_timeout(rcd, npkts);
11261 if (updegr) {
11262 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11263 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11264 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11265 }
11266 mmiowb();
11267 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11268 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11269 << RCV_HDR_HEAD_HEAD_SHIFT);
11270 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11271 mmiowb();
11272}
11273
11274u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11275{
11276 u32 head, tail;
11277
11278 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11279 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11280
11281 if (rcd->rcvhdrtail_kvaddr)
11282 tail = get_rcvhdrtail(rcd);
11283 else
11284 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11285
11286 return head == tail;
11287}
11288
11289/*
11290 * Context Control and Receive Array encoding for buffer size:
11291 * 0x0 invalid
11292 * 0x1 4 KB
11293 * 0x2 8 KB
11294 * 0x3 16 KB
11295 * 0x4 32 KB
11296 * 0x5 64 KB
11297 * 0x6 128 KB
11298 * 0x7 256 KB
11299 * 0x8 512 KB (Receive Array only)
11300 * 0x9 1 MB (Receive Array only)
11301 * 0xa 2 MB (Receive Array only)
11302 *
11303 * 0xB-0xF - reserved (Receive Array only)
11304 *
11305 *
11306 * This routine assumes that the value has already been sanity checked.
11307 */
11308static u32 encoded_size(u32 size)
11309{
11310 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011311 case 4 * 1024: return 0x1;
11312 case 8 * 1024: return 0x2;
11313 case 16 * 1024: return 0x3;
11314 case 32 * 1024: return 0x4;
11315 case 64 * 1024: return 0x5;
11316 case 128 * 1024: return 0x6;
11317 case 256 * 1024: return 0x7;
11318 case 512 * 1024: return 0x8;
11319 case 1 * 1024 * 1024: return 0x9;
11320 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011321 }
11322 return 0x1; /* if invalid, go with the minimum size */
11323}
11324
11325void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11326{
11327 struct hfi1_ctxtdata *rcd;
11328 u64 rcvctrl, reg;
11329 int did_enable = 0;
11330
11331 rcd = dd->rcd[ctxt];
11332 if (!rcd)
11333 return;
11334
11335 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11336
11337 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11338 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011339 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11340 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011341 /* reset the tail and hdr addresses, and sequence count */
11342 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11343 rcd->rcvhdrq_phys);
11344 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11345 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11346 rcd->rcvhdrqtailaddr_phys);
11347 rcd->seq_cnt = 1;
11348
11349 /* reset the cached receive header queue head value */
11350 rcd->head = 0;
11351
11352 /*
11353 * Zero the receive header queue so we don't get false
11354 * positives when checking the sequence number. The
11355 * sequence numbers could land exactly on the same spot.
11356 * E.g. a rcd restart before the receive header wrapped.
11357 */
11358 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11359
11360 /* starting timeout */
11361 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11362
11363 /* enable the context */
11364 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11365
11366 /* clean the egr buffer size first */
11367 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11368 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11369 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11370 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11371
11372 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11373 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11374 did_enable = 1;
11375
11376 /* zero RcvEgrIndexHead */
11377 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11378
11379 /* set eager count and base index */
11380 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11381 & RCV_EGR_CTRL_EGR_CNT_MASK)
11382 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11383 (((rcd->eager_base >> RCV_SHIFT)
11384 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11385 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11386 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11387
11388 /*
11389 * Set TID (expected) count and base index.
11390 * rcd->expected_count is set to individual RcvArray entries,
11391 * not pairs, and the CSR takes a pair-count in groups of
11392 * four, so divide by 8.
11393 */
11394 reg = (((rcd->expected_count >> RCV_SHIFT)
11395 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11396 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11397 (((rcd->expected_base >> RCV_SHIFT)
11398 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11399 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11400 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011401 if (ctxt == HFI1_CTRL_CTXT)
11402 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011403 }
11404 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11405 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011406 /*
11407 * When receive context is being disabled turn on tail
11408 * update with a dummy tail address and then disable
11409 * receive context.
11410 */
11411 if (dd->rcvhdrtail_dummy_physaddr) {
11412 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11413 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011414 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011415 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11416 }
11417
Mike Marciniszyn77241052015-07-30 15:17:43 -040011418 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11419 }
11420 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11421 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11422 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11423 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11424 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11425 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011426 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11427 /* See comment on RcvCtxtCtrl.TailUpd above */
11428 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11429 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11430 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011431 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11432 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11433 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11434 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11435 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011436 /*
11437 * In one-packet-per-eager mode, the size comes from
11438 * the RcvArray entry.
11439 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011440 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11441 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11442 }
11443 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11444 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11445 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11446 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11447 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11448 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11449 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11450 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11451 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11452 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11453 rcd->rcvctrl = rcvctrl;
11454 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11455 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11456
11457 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011458 if (did_enable &&
11459 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011460 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11461 if (reg != 0) {
11462 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011463 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011464 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11465 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11466 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11467 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11468 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11469 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011470 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011471 }
11472 }
11473
11474 if (did_enable) {
11475 /*
11476 * The interrupt timeout and count must be set after
11477 * the context is enabled to take effect.
11478 */
11479 /* set interrupt timeout */
11480 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011481 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011482 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11483
11484 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11485 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11486 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11487 }
11488
11489 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11490 /*
11491 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011492 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11493 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011494 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011495 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11496 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011497}
11498
Dean Luick582e05c2016-02-18 11:13:01 -080011499u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011500{
11501 int ret;
11502 u64 val = 0;
11503
11504 if (namep) {
11505 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011506 *namep = dd->cntrnames;
11507 } else {
11508 const struct cntr_entry *entry;
11509 int i, j;
11510
11511 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011512
11513 /* Get the start of the block of counters */
11514 *cntrp = dd->cntrs;
11515
11516 /*
11517 * Now go and fill in each counter in the block.
11518 */
11519 for (i = 0; i < DEV_CNTR_LAST; i++) {
11520 entry = &dev_cntrs[i];
11521 hfi1_cdbg(CNTR, "reading %s", entry->name);
11522 if (entry->flags & CNTR_DISABLED) {
11523 /* Nothing */
11524 hfi1_cdbg(CNTR, "\tDisabled\n");
11525 } else {
11526 if (entry->flags & CNTR_VL) {
11527 hfi1_cdbg(CNTR, "\tPer VL\n");
11528 for (j = 0; j < C_VL_COUNT; j++) {
11529 val = entry->rw_cntr(entry,
11530 dd, j,
11531 CNTR_MODE_R,
11532 0);
11533 hfi1_cdbg(
11534 CNTR,
11535 "\t\tRead 0x%llx for %d\n",
11536 val, j);
11537 dd->cntrs[entry->offset + j] =
11538 val;
11539 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011540 } else if (entry->flags & CNTR_SDMA) {
11541 hfi1_cdbg(CNTR,
11542 "\t Per SDMA Engine\n");
11543 for (j = 0; j < dd->chip_sdma_engines;
11544 j++) {
11545 val =
11546 entry->rw_cntr(entry, dd, j,
11547 CNTR_MODE_R, 0);
11548 hfi1_cdbg(CNTR,
11549 "\t\tRead 0x%llx for %d\n",
11550 val, j);
11551 dd->cntrs[entry->offset + j] =
11552 val;
11553 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011554 } else {
11555 val = entry->rw_cntr(entry, dd,
11556 CNTR_INVALID_VL,
11557 CNTR_MODE_R, 0);
11558 dd->cntrs[entry->offset] = val;
11559 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11560 }
11561 }
11562 }
11563 }
11564 return ret;
11565}
11566
11567/*
11568 * Used by sysfs to create files for hfi stats to read
11569 */
Dean Luick582e05c2016-02-18 11:13:01 -080011570u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011571{
11572 int ret;
11573 u64 val = 0;
11574
11575 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011576 ret = ppd->dd->portcntrnameslen;
11577 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011578 } else {
11579 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011580 int i, j;
11581
Dean Luick582e05c2016-02-18 11:13:01 -080011582 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011583 *cntrp = ppd->cntrs;
11584
11585 for (i = 0; i < PORT_CNTR_LAST; i++) {
11586 entry = &port_cntrs[i];
11587 hfi1_cdbg(CNTR, "reading %s", entry->name);
11588 if (entry->flags & CNTR_DISABLED) {
11589 /* Nothing */
11590 hfi1_cdbg(CNTR, "\tDisabled\n");
11591 continue;
11592 }
11593
11594 if (entry->flags & CNTR_VL) {
11595 hfi1_cdbg(CNTR, "\tPer VL");
11596 for (j = 0; j < C_VL_COUNT; j++) {
11597 val = entry->rw_cntr(entry, ppd, j,
11598 CNTR_MODE_R,
11599 0);
11600 hfi1_cdbg(
11601 CNTR,
11602 "\t\tRead 0x%llx for %d",
11603 val, j);
11604 ppd->cntrs[entry->offset + j] = val;
11605 }
11606 } else {
11607 val = entry->rw_cntr(entry, ppd,
11608 CNTR_INVALID_VL,
11609 CNTR_MODE_R,
11610 0);
11611 ppd->cntrs[entry->offset] = val;
11612 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11613 }
11614 }
11615 }
11616 return ret;
11617}
11618
11619static void free_cntrs(struct hfi1_devdata *dd)
11620{
11621 struct hfi1_pportdata *ppd;
11622 int i;
11623
11624 if (dd->synth_stats_timer.data)
11625 del_timer_sync(&dd->synth_stats_timer);
11626 dd->synth_stats_timer.data = 0;
11627 ppd = (struct hfi1_pportdata *)(dd + 1);
11628 for (i = 0; i < dd->num_pports; i++, ppd++) {
11629 kfree(ppd->cntrs);
11630 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011631 free_percpu(ppd->ibport_data.rvp.rc_acks);
11632 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11633 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011634 ppd->cntrs = NULL;
11635 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011636 ppd->ibport_data.rvp.rc_acks = NULL;
11637 ppd->ibport_data.rvp.rc_qacks = NULL;
11638 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011639 }
11640 kfree(dd->portcntrnames);
11641 dd->portcntrnames = NULL;
11642 kfree(dd->cntrs);
11643 dd->cntrs = NULL;
11644 kfree(dd->scntrs);
11645 dd->scntrs = NULL;
11646 kfree(dd->cntrnames);
11647 dd->cntrnames = NULL;
11648}
11649
11650#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11651#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11652
11653static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11654 u64 *psval, void *context, int vl)
11655{
11656 u64 val;
11657 u64 sval = *psval;
11658
11659 if (entry->flags & CNTR_DISABLED) {
11660 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11661 return 0;
11662 }
11663
11664 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11665
11666 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11667
11668 /* If its a synthetic counter there is more work we need to do */
11669 if (entry->flags & CNTR_SYNTH) {
11670 if (sval == CNTR_MAX) {
11671 /* No need to read already saturated */
11672 return CNTR_MAX;
11673 }
11674
11675 if (entry->flags & CNTR_32BIT) {
11676 /* 32bit counters can wrap multiple times */
11677 u64 upper = sval >> 32;
11678 u64 lower = (sval << 32) >> 32;
11679
11680 if (lower > val) { /* hw wrapped */
11681 if (upper == CNTR_32BIT_MAX)
11682 val = CNTR_MAX;
11683 else
11684 upper++;
11685 }
11686
11687 if (val != CNTR_MAX)
11688 val = (upper << 32) | val;
11689
11690 } else {
11691 /* If we rolled we are saturated */
11692 if ((val < sval) || (val > CNTR_MAX))
11693 val = CNTR_MAX;
11694 }
11695 }
11696
11697 *psval = val;
11698
11699 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11700
11701 return val;
11702}
11703
11704static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11705 struct cntr_entry *entry,
11706 u64 *psval, void *context, int vl, u64 data)
11707{
11708 u64 val;
11709
11710 if (entry->flags & CNTR_DISABLED) {
11711 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11712 return 0;
11713 }
11714
11715 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11716
11717 if (entry->flags & CNTR_SYNTH) {
11718 *psval = data;
11719 if (entry->flags & CNTR_32BIT) {
11720 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11721 (data << 32) >> 32);
11722 val = data; /* return the full 64bit value */
11723 } else {
11724 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11725 data);
11726 }
11727 } else {
11728 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11729 }
11730
11731 *psval = val;
11732
11733 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11734
11735 return val;
11736}
11737
11738u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11739{
11740 struct cntr_entry *entry;
11741 u64 *sval;
11742
11743 entry = &dev_cntrs[index];
11744 sval = dd->scntrs + entry->offset;
11745
11746 if (vl != CNTR_INVALID_VL)
11747 sval += vl;
11748
11749 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11750}
11751
11752u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11753{
11754 struct cntr_entry *entry;
11755 u64 *sval;
11756
11757 entry = &dev_cntrs[index];
11758 sval = dd->scntrs + entry->offset;
11759
11760 if (vl != CNTR_INVALID_VL)
11761 sval += vl;
11762
11763 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11764}
11765
11766u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11767{
11768 struct cntr_entry *entry;
11769 u64 *sval;
11770
11771 entry = &port_cntrs[index];
11772 sval = ppd->scntrs + entry->offset;
11773
11774 if (vl != CNTR_INVALID_VL)
11775 sval += vl;
11776
11777 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11778 (index <= C_RCV_HDR_OVF_LAST)) {
11779 /* We do not want to bother for disabled contexts */
11780 return 0;
11781 }
11782
11783 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11784}
11785
11786u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11787{
11788 struct cntr_entry *entry;
11789 u64 *sval;
11790
11791 entry = &port_cntrs[index];
11792 sval = ppd->scntrs + entry->offset;
11793
11794 if (vl != CNTR_INVALID_VL)
11795 sval += vl;
11796
11797 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11798 (index <= C_RCV_HDR_OVF_LAST)) {
11799 /* We do not want to bother for disabled contexts */
11800 return 0;
11801 }
11802
11803 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11804}
11805
11806static void update_synth_timer(unsigned long opaque)
11807{
11808 u64 cur_tx;
11809 u64 cur_rx;
11810 u64 total_flits;
11811 u8 update = 0;
11812 int i, j, vl;
11813 struct hfi1_pportdata *ppd;
11814 struct cntr_entry *entry;
11815
11816 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11817
11818 /*
11819 * Rather than keep beating on the CSRs pick a minimal set that we can
11820 * check to watch for potential roll over. We can do this by looking at
11821 * the number of flits sent/recv. If the total flits exceeds 32bits then
11822 * we have to iterate all the counters and update.
11823 */
11824 entry = &dev_cntrs[C_DC_RCV_FLITS];
11825 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11826
11827 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11828 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11829
11830 hfi1_cdbg(
11831 CNTR,
11832 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11833 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11834
11835 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11836 /*
11837 * May not be strictly necessary to update but it won't hurt and
11838 * simplifies the logic here.
11839 */
11840 update = 1;
11841 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11842 dd->unit);
11843 } else {
11844 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11845 hfi1_cdbg(CNTR,
11846 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11847 total_flits, (u64)CNTR_32BIT_MAX);
11848 if (total_flits >= CNTR_32BIT_MAX) {
11849 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11850 dd->unit);
11851 update = 1;
11852 }
11853 }
11854
11855 if (update) {
11856 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11857 for (i = 0; i < DEV_CNTR_LAST; i++) {
11858 entry = &dev_cntrs[i];
11859 if (entry->flags & CNTR_VL) {
11860 for (vl = 0; vl < C_VL_COUNT; vl++)
11861 read_dev_cntr(dd, i, vl);
11862 } else {
11863 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11864 }
11865 }
11866 ppd = (struct hfi1_pportdata *)(dd + 1);
11867 for (i = 0; i < dd->num_pports; i++, ppd++) {
11868 for (j = 0; j < PORT_CNTR_LAST; j++) {
11869 entry = &port_cntrs[j];
11870 if (entry->flags & CNTR_VL) {
11871 for (vl = 0; vl < C_VL_COUNT; vl++)
11872 read_port_cntr(ppd, j, vl);
11873 } else {
11874 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11875 }
11876 }
11877 }
11878
11879 /*
11880 * We want the value in the register. The goal is to keep track
11881 * of the number of "ticks" not the counter value. In other
11882 * words if the register rolls we want to notice it and go ahead
11883 * and force an update.
11884 */
11885 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11886 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11887 CNTR_MODE_R, 0);
11888
11889 entry = &dev_cntrs[C_DC_RCV_FLITS];
11890 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11891 CNTR_MODE_R, 0);
11892
11893 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11894 dd->unit, dd->last_tx, dd->last_rx);
11895
11896 } else {
11897 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11898 }
11899
11900mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11901}
11902
11903#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11904static int init_cntrs(struct hfi1_devdata *dd)
11905{
Dean Luickc024c552016-01-11 18:30:57 -050011906 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011907 size_t sz;
11908 char *p;
11909 char name[C_MAX_NAME];
11910 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011911 const char *bit_type_32 = ",32";
11912 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011913
11914 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011915 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11916 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011917
11918 /***********************/
11919 /* per device counters */
11920 /***********************/
11921
11922 /* size names and determine how many we have*/
11923 dd->ndevcntrs = 0;
11924 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011925
11926 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011927 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11928 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11929 continue;
11930 }
11931
11932 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011933 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011934 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011935 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080011936 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011937 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011938 /* Add ",32" for 32-bit counters */
11939 if (dev_cntrs[i].flags & CNTR_32BIT)
11940 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011941 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011942 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011943 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011944 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011945 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011946 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011947 snprintf(name, C_MAX_NAME, "%s%d",
11948 dev_cntrs[i].name, j);
11949 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011950 /* Add ",32" for 32-bit counters */
11951 if (dev_cntrs[i].flags & CNTR_32BIT)
11952 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011953 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011954 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011955 }
11956 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011957 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011958 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011959 /* Add ",32" for 32-bit counters */
11960 if (dev_cntrs[i].flags & CNTR_32BIT)
11961 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011962 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011963 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011964 }
11965 }
11966
11967 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011968 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011969 if (!dd->cntrs)
11970 goto bail;
11971
Dean Luickc024c552016-01-11 18:30:57 -050011972 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011973 if (!dd->scntrs)
11974 goto bail;
11975
Mike Marciniszyn77241052015-07-30 15:17:43 -040011976 /* allocate space for the counter names */
11977 dd->cntrnameslen = sz;
11978 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11979 if (!dd->cntrnames)
11980 goto bail;
11981
11982 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011983 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011984 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11985 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011986 } else if (dev_cntrs[i].flags & CNTR_VL) {
11987 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011988 snprintf(name, C_MAX_NAME, "%s%d",
11989 dev_cntrs[i].name,
11990 vl_from_idx(j));
11991 memcpy(p, name, strlen(name));
11992 p += strlen(name);
11993
11994 /* Counter is 32 bits */
11995 if (dev_cntrs[i].flags & CNTR_32BIT) {
11996 memcpy(p, bit_type_32, bit_type_32_sz);
11997 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011998 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011999
Mike Marciniszyn77241052015-07-30 15:17:43 -040012000 *p++ = '\n';
12001 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012002 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12003 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012004 snprintf(name, C_MAX_NAME, "%s%d",
12005 dev_cntrs[i].name, j);
12006 memcpy(p, name, strlen(name));
12007 p += strlen(name);
12008
12009 /* Counter is 32 bits */
12010 if (dev_cntrs[i].flags & CNTR_32BIT) {
12011 memcpy(p, bit_type_32, bit_type_32_sz);
12012 p += bit_type_32_sz;
12013 }
12014
12015 *p++ = '\n';
12016 }
12017 } else {
12018 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12019 p += strlen(dev_cntrs[i].name);
12020
12021 /* Counter is 32 bits */
12022 if (dev_cntrs[i].flags & CNTR_32BIT) {
12023 memcpy(p, bit_type_32, bit_type_32_sz);
12024 p += bit_type_32_sz;
12025 }
12026
12027 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012028 }
12029 }
12030
12031 /*********************/
12032 /* per port counters */
12033 /*********************/
12034
12035 /*
12036 * Go through the counters for the overflows and disable the ones we
12037 * don't need. This varies based on platform so we need to do it
12038 * dynamically here.
12039 */
12040 rcv_ctxts = dd->num_rcv_contexts;
12041 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12042 i <= C_RCV_HDR_OVF_LAST; i++) {
12043 port_cntrs[i].flags |= CNTR_DISABLED;
12044 }
12045
12046 /* size port counter names and determine how many we have*/
12047 sz = 0;
12048 dd->nportcntrs = 0;
12049 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012050 if (port_cntrs[i].flags & CNTR_DISABLED) {
12051 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12052 continue;
12053 }
12054
12055 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012056 port_cntrs[i].offset = dd->nportcntrs;
12057 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012058 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012059 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012060 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012061 /* Add ",32" for 32-bit counters */
12062 if (port_cntrs[i].flags & CNTR_32BIT)
12063 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012064 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012065 dd->nportcntrs++;
12066 }
12067 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012068 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012069 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012070 /* Add ",32" for 32-bit counters */
12071 if (port_cntrs[i].flags & CNTR_32BIT)
12072 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012073 port_cntrs[i].offset = dd->nportcntrs;
12074 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012075 }
12076 }
12077
12078 /* allocate space for the counter names */
12079 dd->portcntrnameslen = sz;
12080 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12081 if (!dd->portcntrnames)
12082 goto bail;
12083
12084 /* fill in port cntr names */
12085 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12086 if (port_cntrs[i].flags & CNTR_DISABLED)
12087 continue;
12088
12089 if (port_cntrs[i].flags & CNTR_VL) {
12090 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012091 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012092 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012093 memcpy(p, name, strlen(name));
12094 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012095
12096 /* Counter is 32 bits */
12097 if (port_cntrs[i].flags & CNTR_32BIT) {
12098 memcpy(p, bit_type_32, bit_type_32_sz);
12099 p += bit_type_32_sz;
12100 }
12101
Mike Marciniszyn77241052015-07-30 15:17:43 -040012102 *p++ = '\n';
12103 }
12104 } else {
12105 memcpy(p, port_cntrs[i].name,
12106 strlen(port_cntrs[i].name));
12107 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012108
12109 /* Counter is 32 bits */
12110 if (port_cntrs[i].flags & CNTR_32BIT) {
12111 memcpy(p, bit_type_32, bit_type_32_sz);
12112 p += bit_type_32_sz;
12113 }
12114
Mike Marciniszyn77241052015-07-30 15:17:43 -040012115 *p++ = '\n';
12116 }
12117 }
12118
12119 /* allocate per port storage for counter values */
12120 ppd = (struct hfi1_pportdata *)(dd + 1);
12121 for (i = 0; i < dd->num_pports; i++, ppd++) {
12122 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12123 if (!ppd->cntrs)
12124 goto bail;
12125
12126 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12127 if (!ppd->scntrs)
12128 goto bail;
12129 }
12130
12131 /* CPU counters need to be allocated and zeroed */
12132 if (init_cpu_counters(dd))
12133 goto bail;
12134
12135 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12136 return 0;
12137bail:
12138 free_cntrs(dd);
12139 return -ENOMEM;
12140}
12141
Mike Marciniszyn77241052015-07-30 15:17:43 -040012142static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12143{
12144 switch (chip_lstate) {
12145 default:
12146 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012147 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12148 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012149 /* fall through */
12150 case LSTATE_DOWN:
12151 return IB_PORT_DOWN;
12152 case LSTATE_INIT:
12153 return IB_PORT_INIT;
12154 case LSTATE_ARMED:
12155 return IB_PORT_ARMED;
12156 case LSTATE_ACTIVE:
12157 return IB_PORT_ACTIVE;
12158 }
12159}
12160
12161u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12162{
12163 /* look at the HFI meta-states only */
12164 switch (chip_pstate & 0xf0) {
12165 default:
12166 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012167 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012168 /* fall through */
12169 case PLS_DISABLED:
12170 return IB_PORTPHYSSTATE_DISABLED;
12171 case PLS_OFFLINE:
12172 return OPA_PORTPHYSSTATE_OFFLINE;
12173 case PLS_POLLING:
12174 return IB_PORTPHYSSTATE_POLLING;
12175 case PLS_CONFIGPHY:
12176 return IB_PORTPHYSSTATE_TRAINING;
12177 case PLS_LINKUP:
12178 return IB_PORTPHYSSTATE_LINKUP;
12179 case PLS_PHYTEST:
12180 return IB_PORTPHYSSTATE_PHY_TEST;
12181 }
12182}
12183
12184/* return the OPA port logical state name */
12185const char *opa_lstate_name(u32 lstate)
12186{
12187 static const char * const port_logical_names[] = {
12188 "PORT_NOP",
12189 "PORT_DOWN",
12190 "PORT_INIT",
12191 "PORT_ARMED",
12192 "PORT_ACTIVE",
12193 "PORT_ACTIVE_DEFER",
12194 };
12195 if (lstate < ARRAY_SIZE(port_logical_names))
12196 return port_logical_names[lstate];
12197 return "unknown";
12198}
12199
12200/* return the OPA port physical state name */
12201const char *opa_pstate_name(u32 pstate)
12202{
12203 static const char * const port_physical_names[] = {
12204 "PHYS_NOP",
12205 "reserved1",
12206 "PHYS_POLL",
12207 "PHYS_DISABLED",
12208 "PHYS_TRAINING",
12209 "PHYS_LINKUP",
12210 "PHYS_LINK_ERR_RECOVER",
12211 "PHYS_PHY_TEST",
12212 "reserved8",
12213 "PHYS_OFFLINE",
12214 "PHYS_GANGED",
12215 "PHYS_TEST",
12216 };
12217 if (pstate < ARRAY_SIZE(port_physical_names))
12218 return port_physical_names[pstate];
12219 return "unknown";
12220}
12221
12222/*
12223 * Read the hardware link state and set the driver's cached value of it.
12224 * Return the (new) current value.
12225 */
12226u32 get_logical_state(struct hfi1_pportdata *ppd)
12227{
12228 u32 new_state;
12229
12230 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12231 if (new_state != ppd->lstate) {
12232 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012233 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012234 ppd->lstate = new_state;
12235 }
12236 /*
12237 * Set port status flags in the page mapped into userspace
12238 * memory. Do it here to ensure a reliable state - this is
12239 * the only function called by all state handling code.
12240 * Always set the flags due to the fact that the cache value
12241 * might have been changed explicitly outside of this
12242 * function.
12243 */
12244 if (ppd->statusp) {
12245 switch (ppd->lstate) {
12246 case IB_PORT_DOWN:
12247 case IB_PORT_INIT:
12248 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12249 HFI1_STATUS_IB_READY);
12250 break;
12251 case IB_PORT_ARMED:
12252 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12253 break;
12254 case IB_PORT_ACTIVE:
12255 *ppd->statusp |= HFI1_STATUS_IB_READY;
12256 break;
12257 }
12258 }
12259 return ppd->lstate;
12260}
12261
12262/**
12263 * wait_logical_linkstate - wait for an IB link state change to occur
12264 * @ppd: port device
12265 * @state: the state to wait for
12266 * @msecs: the number of milliseconds to wait
12267 *
12268 * Wait up to msecs milliseconds for IB link state change to occur.
12269 * For now, take the easy polling route.
12270 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12271 */
12272static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12273 int msecs)
12274{
12275 unsigned long timeout;
12276
12277 timeout = jiffies + msecs_to_jiffies(msecs);
12278 while (1) {
12279 if (get_logical_state(ppd) == state)
12280 return 0;
12281 if (time_after(jiffies, timeout))
12282 break;
12283 msleep(20);
12284 }
12285 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12286
12287 return -ETIMEDOUT;
12288}
12289
12290u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12291{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012292 u32 pstate;
12293 u32 ib_pstate;
12294
12295 pstate = read_physical_state(ppd->dd);
12296 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012297 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012298 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012299 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12300 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12301 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012302 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012303 }
12304 return ib_pstate;
12305}
12306
12307/*
12308 * Read/modify/write ASIC_QSFP register bits as selected by mask
12309 * data: 0 or 1 in the positions depending on what needs to be written
12310 * dir: 0 for read, 1 for write
12311 * mask: select by setting
12312 * I2CCLK (bit 0)
12313 * I2CDATA (bit 1)
12314 */
12315u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12316 u32 mask)
12317{
12318 u64 qsfp_oe, target_oe;
12319
12320 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12321 if (mask) {
12322 /* We are writing register bits, so lock access */
12323 dir &= mask;
12324 data &= mask;
12325
12326 qsfp_oe = read_csr(dd, target_oe);
12327 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12328 write_csr(dd, target_oe, qsfp_oe);
12329 }
12330 /* We are exclusively reading bits here, but it is unlikely
12331 * we'll get valid data when we set the direction of the pin
12332 * in the same call, so read should call this function again
12333 * to get valid data
12334 */
12335 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12336}
12337
12338#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12339(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12340
12341#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12342(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12343
12344int hfi1_init_ctxt(struct send_context *sc)
12345{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012346 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012347 struct hfi1_devdata *dd = sc->dd;
12348 u64 reg;
12349 u8 set = (sc->type == SC_USER ?
12350 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12351 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12352 reg = read_kctxt_csr(dd, sc->hw_context,
12353 SEND_CTXT_CHECK_ENABLE);
12354 if (set)
12355 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12356 else
12357 SET_STATIC_RATE_CONTROL_SMASK(reg);
12358 write_kctxt_csr(dd, sc->hw_context,
12359 SEND_CTXT_CHECK_ENABLE, reg);
12360 }
12361 return 0;
12362}
12363
12364int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12365{
12366 int ret = 0;
12367 u64 reg;
12368
12369 if (dd->icode != ICODE_RTL_SILICON) {
12370 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12371 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12372 __func__);
12373 return -EINVAL;
12374 }
12375 reg = read_csr(dd, ASIC_STS_THERM);
12376 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12377 ASIC_STS_THERM_CURR_TEMP_MASK);
12378 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12379 ASIC_STS_THERM_LO_TEMP_MASK);
12380 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12381 ASIC_STS_THERM_HI_TEMP_MASK);
12382 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12383 ASIC_STS_THERM_CRIT_TEMP_MASK);
12384 /* triggers is a 3-bit value - 1 bit per trigger. */
12385 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12386
12387 return ret;
12388}
12389
12390/* ========================================================================= */
12391
12392/*
12393 * Enable/disable chip from delivering interrupts.
12394 */
12395void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12396{
12397 int i;
12398
12399 /*
12400 * In HFI, the mask needs to be 1 to allow interrupts.
12401 */
12402 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012403 /* enable all interrupts */
12404 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012405 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012406
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012407 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012408 } else {
12409 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012410 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012411 }
12412}
12413
12414/*
12415 * Clear all interrupt sources on the chip.
12416 */
12417static void clear_all_interrupts(struct hfi1_devdata *dd)
12418{
12419 int i;
12420
12421 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012422 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012423
12424 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12425 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12426 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12427 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12428 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12429 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12430 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12431 for (i = 0; i < dd->chip_send_contexts; i++)
12432 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12433 for (i = 0; i < dd->chip_sdma_engines; i++)
12434 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12435
12436 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12437 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12438 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12439}
12440
12441/* Move to pcie.c? */
12442static void disable_intx(struct pci_dev *pdev)
12443{
12444 pci_intx(pdev, 0);
12445}
12446
12447static void clean_up_interrupts(struct hfi1_devdata *dd)
12448{
12449 int i;
12450
12451 /* remove irqs - must happen before disabling/turning off */
12452 if (dd->num_msix_entries) {
12453 /* MSI-X */
12454 struct hfi1_msix_entry *me = dd->msix_entries;
12455
12456 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012457 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012458 continue;
12459 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012460 free_irq(me->msix.vector, me->arg);
12461 }
12462 } else {
12463 /* INTx */
12464 if (dd->requested_intx_irq) {
12465 free_irq(dd->pcidev->irq, dd);
12466 dd->requested_intx_irq = 0;
12467 }
12468 }
12469
12470 /* turn off interrupts */
12471 if (dd->num_msix_entries) {
12472 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012473 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012474 } else {
12475 /* INTx */
12476 disable_intx(dd->pcidev);
12477 }
12478
12479 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012480 kfree(dd->msix_entries);
12481 dd->msix_entries = NULL;
12482 dd->num_msix_entries = 0;
12483}
12484
12485/*
12486 * Remap the interrupt source from the general handler to the given MSI-X
12487 * interrupt.
12488 */
12489static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12490{
12491 u64 reg;
12492 int m, n;
12493
12494 /* clear from the handled mask of the general interrupt */
12495 m = isrc / 64;
12496 n = isrc % 64;
12497 dd->gi_mask[m] &= ~((u64)1 << n);
12498
12499 /* direct the chip source to the given MSI-X interrupt */
12500 m = isrc / 8;
12501 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012502 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12503 reg &= ~((u64)0xff << (8 * n));
12504 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12505 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012506}
12507
12508static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12509 int engine, int msix_intr)
12510{
12511 /*
12512 * SDMA engine interrupt sources grouped by type, rather than
12513 * engine. Per-engine interrupts are as follows:
12514 * SDMA
12515 * SDMAProgress
12516 * SDMAIdle
12517 */
Jubin John8638b772016-02-14 20:19:24 -080012518 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012519 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012520 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012521 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012522 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012523 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012524}
12525
Mike Marciniszyn77241052015-07-30 15:17:43 -040012526static int request_intx_irq(struct hfi1_devdata *dd)
12527{
12528 int ret;
12529
Jubin John98050712015-11-16 21:59:27 -050012530 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12531 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012532 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012533 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012534 if (ret)
12535 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012536 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012537 else
12538 dd->requested_intx_irq = 1;
12539 return ret;
12540}
12541
12542static int request_msix_irqs(struct hfi1_devdata *dd)
12543{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012544 int first_general, last_general;
12545 int first_sdma, last_sdma;
12546 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012547 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012548
12549 /* calculate the ranges we are going to use */
12550 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012551 last_general = first_general + 1;
12552 first_sdma = last_general;
12553 last_sdma = first_sdma + dd->num_sdma;
12554 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012555 last_rx = first_rx + dd->n_krcv_queues;
12556
12557 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012558 * Sanity check - the code expects all SDMA chip source
12559 * interrupts to be in the same CSR, starting at bit 0. Verify
12560 * that this is true by checking the bit location of the start.
12561 */
12562 BUILD_BUG_ON(IS_SDMA_START % 64);
12563
12564 for (i = 0; i < dd->num_msix_entries; i++) {
12565 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12566 const char *err_info;
12567 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012568 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012569 void *arg;
12570 int idx;
12571 struct hfi1_ctxtdata *rcd = NULL;
12572 struct sdma_engine *sde = NULL;
12573
12574 /* obtain the arguments to request_irq */
12575 if (first_general <= i && i < last_general) {
12576 idx = i - first_general;
12577 handler = general_interrupt;
12578 arg = dd;
12579 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012580 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012581 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012582 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012583 } else if (first_sdma <= i && i < last_sdma) {
12584 idx = i - first_sdma;
12585 sde = &dd->per_sdma[idx];
12586 handler = sdma_interrupt;
12587 arg = sde;
12588 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012589 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012590 err_info = "sdma";
12591 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012592 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012593 } else if (first_rx <= i && i < last_rx) {
12594 idx = i - first_rx;
12595 rcd = dd->rcd[idx];
12596 /* no interrupt if no rcd */
12597 if (!rcd)
12598 continue;
12599 /*
12600 * Set the interrupt register and mask for this
12601 * context's interrupt.
12602 */
Jubin John8638b772016-02-14 20:19:24 -080012603 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012604 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012605 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012606 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012607 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012608 arg = rcd;
12609 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012610 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012611 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012612 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012613 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012614 } else {
12615 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012616 * ignore it
12617 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012618 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012619 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012620 continue;
12621 }
12622 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012623 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012624 continue;
12625 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012626 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012627
Dean Luickf4f30031c2015-10-26 10:28:44 -040012628 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012629 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012630 if (ret) {
12631 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012632 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12633 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012634 return ret;
12635 }
12636 /*
12637 * assign arg after request_irq call, so it will be
12638 * cleaned up
12639 */
12640 me->arg = arg;
12641
Mitko Haralanov957558c2016-02-03 14:33:40 -080012642 ret = hfi1_get_irq_affinity(dd, me);
12643 if (ret)
12644 dd_dev_err(dd,
12645 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012646 }
12647
Mike Marciniszyn77241052015-07-30 15:17:43 -040012648 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012649}
12650
12651/*
12652 * Set the general handler to accept all interrupts, remap all
12653 * chip interrupts back to MSI-X 0.
12654 */
12655static void reset_interrupts(struct hfi1_devdata *dd)
12656{
12657 int i;
12658
12659 /* all interrupts handled by the general handler */
12660 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12661 dd->gi_mask[i] = ~(u64)0;
12662
12663 /* all chip interrupts map to MSI-X 0 */
12664 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012665 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012666}
12667
12668static int set_up_interrupts(struct hfi1_devdata *dd)
12669{
12670 struct hfi1_msix_entry *entries;
12671 u32 total, request;
12672 int i, ret;
12673 int single_interrupt = 0; /* we expect to have all the interrupts */
12674
12675 /*
12676 * Interrupt count:
12677 * 1 general, "slow path" interrupt (includes the SDMA engines
12678 * slow source, SDMACleanupDone)
12679 * N interrupts - one per used SDMA engine
12680 * M interrupt - one per kernel receive context
12681 */
12682 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12683
12684 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12685 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012686 ret = -ENOMEM;
12687 goto fail;
12688 }
12689 /* 1-1 MSI-X entry assignment */
12690 for (i = 0; i < total; i++)
12691 entries[i].msix.entry = i;
12692
12693 /* ask for MSI-X interrupts */
12694 request = total;
12695 request_msix(dd, &request, entries);
12696
12697 if (request == 0) {
12698 /* using INTx */
12699 /* dd->num_msix_entries already zero */
12700 kfree(entries);
12701 single_interrupt = 1;
12702 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12703 } else {
12704 /* using MSI-X */
12705 dd->num_msix_entries = request;
12706 dd->msix_entries = entries;
12707
12708 if (request != total) {
12709 /* using MSI-X, with reduced interrupts */
12710 dd_dev_err(
12711 dd,
12712 "cannot handle reduced interrupt case, want %u, got %u\n",
12713 total, request);
12714 ret = -EINVAL;
12715 goto fail;
12716 }
12717 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12718 }
12719
12720 /* mask all interrupts */
12721 set_intr_state(dd, 0);
12722 /* clear all pending interrupts */
12723 clear_all_interrupts(dd);
12724
12725 /* reset general handler mask, chip MSI-X mappings */
12726 reset_interrupts(dd);
12727
12728 if (single_interrupt)
12729 ret = request_intx_irq(dd);
12730 else
12731 ret = request_msix_irqs(dd);
12732 if (ret)
12733 goto fail;
12734
12735 return 0;
12736
12737fail:
12738 clean_up_interrupts(dd);
12739 return ret;
12740}
12741
12742/*
12743 * Set up context values in dd. Sets:
12744 *
12745 * num_rcv_contexts - number of contexts being used
12746 * n_krcv_queues - number of kernel contexts
12747 * first_user_ctxt - first non-kernel context in array of contexts
12748 * freectxts - number of free user contexts
12749 * num_send_contexts - number of PIO send contexts being used
12750 */
12751static int set_up_context_variables(struct hfi1_devdata *dd)
12752{
12753 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012754 int total_contexts;
12755 int ret;
12756 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070012757 int qos_rmt_count;
12758 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012759
12760 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012761 * Kernel receive contexts:
12762 * - min of 2 or 1 context/numa (excluding control context)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012763 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070012764 * - Context 1 - first kernel context
12765 * - Context 2 - second kernel context
12766 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040012767 */
12768 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012769 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012770 * n_krcvqs is the sum of module parameter kernel receive
12771 * contexts, krcvqs[]. It does not include the control
12772 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012773 */
Dean Luick33a9eb52016-04-12 10:50:22 -070012774 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012775 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012776 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012777 num_kernel_contexts =
12778 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12779 /*
12780 * Every kernel receive context needs an ACK send context.
12781 * one send context is allocated for each VL{0-7} and VL15
12782 */
12783 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12784 dd_dev_err(dd,
12785 "Reducing # kernel rcv contexts to: %d, from %d\n",
12786 (int)(dd->chip_send_contexts - num_vls - 1),
12787 (int)num_kernel_contexts);
12788 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12789 }
12790 /*
Jubin John0852d242016-04-12 11:30:08 -070012791 * User contexts:
12792 * - default to 1 user context per real (non-HT) CPU core if
12793 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012794 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012795 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070012796 num_user_contexts =
12797 cpumask_weight(&dd->affinity->real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012798
12799 total_contexts = num_kernel_contexts + num_user_contexts;
12800
12801 /*
12802 * Adjust the counts given a global max.
12803 */
12804 if (total_contexts > dd->chip_rcv_contexts) {
12805 dd_dev_err(dd,
12806 "Reducing # user receive contexts to: %d, from %d\n",
12807 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12808 (int)num_user_contexts);
12809 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12810 /* recalculate */
12811 total_contexts = num_kernel_contexts + num_user_contexts;
12812 }
12813
Dean Luick8f000f72016-04-12 11:32:06 -070012814 /* each user context requires an entry in the RMT */
12815 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12816 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12817 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12818 dd_dev_err(dd,
12819 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12820 (int)num_user_contexts,
12821 user_rmt_reduced);
12822 /* recalculate */
12823 num_user_contexts = user_rmt_reduced;
12824 total_contexts = num_kernel_contexts + num_user_contexts;
12825 }
12826
Mike Marciniszyn77241052015-07-30 15:17:43 -040012827 /* the first N are kernel contexts, the rest are user contexts */
12828 dd->num_rcv_contexts = total_contexts;
12829 dd->n_krcv_queues = num_kernel_contexts;
12830 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012831 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012832 dd->freectxts = num_user_contexts;
12833 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012834 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12835 (int)dd->chip_rcv_contexts,
12836 (int)dd->num_rcv_contexts,
12837 (int)dd->n_krcv_queues,
12838 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012839
12840 /*
12841 * Receive array allocation:
12842 * All RcvArray entries are divided into groups of 8. This
12843 * is required by the hardware and will speed up writes to
12844 * consecutive entries by using write-combining of the entire
12845 * cacheline.
12846 *
12847 * The number of groups are evenly divided among all contexts.
12848 * any left over groups will be given to the first N user
12849 * contexts.
12850 */
12851 dd->rcv_entries.group_size = RCV_INCREMENT;
12852 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12853 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12854 dd->rcv_entries.nctxt_extra = ngroups -
12855 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12856 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12857 dd->rcv_entries.ngroups,
12858 dd->rcv_entries.nctxt_extra);
12859 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12860 MAX_EAGER_ENTRIES * 2) {
12861 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12862 dd->rcv_entries.group_size;
12863 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012864 "RcvArray group count too high, change to %u\n",
12865 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012866 dd->rcv_entries.nctxt_extra = 0;
12867 }
12868 /*
12869 * PIO send contexts
12870 */
12871 ret = init_sc_pools_and_sizes(dd);
12872 if (ret >= 0) { /* success */
12873 dd->num_send_contexts = ret;
12874 dd_dev_info(
12875 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012876 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040012877 dd->chip_send_contexts,
12878 dd->num_send_contexts,
12879 dd->sc_sizes[SC_KERNEL].count,
12880 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012881 dd->sc_sizes[SC_USER].count,
12882 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012883 ret = 0; /* success */
12884 }
12885
12886 return ret;
12887}
12888
12889/*
12890 * Set the device/port partition key table. The MAD code
12891 * will ensure that, at least, the partial management
12892 * partition key is present in the table.
12893 */
12894static void set_partition_keys(struct hfi1_pportdata *ppd)
12895{
12896 struct hfi1_devdata *dd = ppd->dd;
12897 u64 reg = 0;
12898 int i;
12899
12900 dd_dev_info(dd, "Setting partition keys\n");
12901 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12902 reg |= (ppd->pkeys[i] &
12903 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12904 ((i % 4) *
12905 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12906 /* Each register holds 4 PKey values. */
12907 if ((i % 4) == 3) {
12908 write_csr(dd, RCV_PARTITION_KEY +
12909 ((i - 3) * 2), reg);
12910 reg = 0;
12911 }
12912 }
12913
12914 /* Always enable HW pkeys check when pkeys table is set */
12915 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12916}
12917
12918/*
12919 * These CSRs and memories are uninitialized on reset and must be
12920 * written before reading to set the ECC/parity bits.
12921 *
12922 * NOTE: All user context CSRs that are not mmaped write-only
12923 * (e.g. the TID flows) must be initialized even if the driver never
12924 * reads them.
12925 */
12926static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12927{
12928 int i, j;
12929
12930 /* CceIntMap */
12931 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012932 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012933
12934 /* SendCtxtCreditReturnAddr */
12935 for (i = 0; i < dd->chip_send_contexts; i++)
12936 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12937
12938 /* PIO Send buffers */
12939 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080012940 /*
12941 * These are not normally read, and (presently) have no method
12942 * to be read, so are not pre-initialized
12943 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012944
12945 /* RcvHdrAddr */
12946 /* RcvHdrTailAddr */
12947 /* RcvTidFlowTable */
12948 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12949 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12950 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12951 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012952 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012953 }
12954
12955 /* RcvArray */
12956 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080012957 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080012958 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012959
12960 /* RcvQPMapTable */
12961 for (i = 0; i < 32; i++)
12962 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12963}
12964
12965/*
12966 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12967 */
12968static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12969 u64 ctrl_bits)
12970{
12971 unsigned long timeout;
12972 u64 reg;
12973
12974 /* is the condition present? */
12975 reg = read_csr(dd, CCE_STATUS);
12976 if ((reg & status_bits) == 0)
12977 return;
12978
12979 /* clear the condition */
12980 write_csr(dd, CCE_CTRL, ctrl_bits);
12981
12982 /* wait for the condition to clear */
12983 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12984 while (1) {
12985 reg = read_csr(dd, CCE_STATUS);
12986 if ((reg & status_bits) == 0)
12987 return;
12988 if (time_after(jiffies, timeout)) {
12989 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012990 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12991 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012992 return;
12993 }
12994 udelay(1);
12995 }
12996}
12997
12998/* set CCE CSRs to chip reset defaults */
12999static void reset_cce_csrs(struct hfi1_devdata *dd)
13000{
13001 int i;
13002
13003 /* CCE_REVISION read-only */
13004 /* CCE_REVISION2 read-only */
13005 /* CCE_CTRL - bits clear automatically */
13006 /* CCE_STATUS read-only, use CceCtrl to clear */
13007 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13008 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13009 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13010 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13011 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13012 /* CCE_ERR_STATUS read-only */
13013 write_csr(dd, CCE_ERR_MASK, 0);
13014 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13015 /* CCE_ERR_FORCE leave alone */
13016 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13017 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13018 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13019 /* CCE_PCIE_CTRL leave alone */
13020 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13021 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13022 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013023 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013024 }
13025 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13026 /* CCE_MSIX_PBA read-only */
13027 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13028 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13029 }
13030 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13031 write_csr(dd, CCE_INT_MAP, 0);
13032 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13033 /* CCE_INT_STATUS read-only */
13034 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13035 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13036 /* CCE_INT_FORCE leave alone */
13037 /* CCE_INT_BLOCKED read-only */
13038 }
13039 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13040 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13041}
13042
Mike Marciniszyn77241052015-07-30 15:17:43 -040013043/* set MISC CSRs to chip reset defaults */
13044static void reset_misc_csrs(struct hfi1_devdata *dd)
13045{
13046 int i;
13047
13048 for (i = 0; i < 32; i++) {
13049 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13050 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13051 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13052 }
Jubin John4d114fd2016-02-14 20:21:43 -080013053 /*
13054 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13055 * only be written 128-byte chunks
13056 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013057 /* init RSA engine to clear lingering errors */
13058 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13059 write_csr(dd, MISC_CFG_RSA_MU, 0);
13060 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13061 /* MISC_STS_8051_DIGEST read-only */
13062 /* MISC_STS_SBM_DIGEST read-only */
13063 /* MISC_STS_PCIE_DIGEST read-only */
13064 /* MISC_STS_FAB_DIGEST read-only */
13065 /* MISC_ERR_STATUS read-only */
13066 write_csr(dd, MISC_ERR_MASK, 0);
13067 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13068 /* MISC_ERR_FORCE leave alone */
13069}
13070
13071/* set TXE CSRs to chip reset defaults */
13072static void reset_txe_csrs(struct hfi1_devdata *dd)
13073{
13074 int i;
13075
13076 /*
13077 * TXE Kernel CSRs
13078 */
13079 write_csr(dd, SEND_CTRL, 0);
13080 __cm_reset(dd, 0); /* reset CM internal state */
13081 /* SEND_CONTEXTS read-only */
13082 /* SEND_DMA_ENGINES read-only */
13083 /* SEND_PIO_MEM_SIZE read-only */
13084 /* SEND_DMA_MEM_SIZE read-only */
13085 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13086 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13087 /* SEND_PIO_ERR_STATUS read-only */
13088 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13089 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13090 /* SEND_PIO_ERR_FORCE leave alone */
13091 /* SEND_DMA_ERR_STATUS read-only */
13092 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13093 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13094 /* SEND_DMA_ERR_FORCE leave alone */
13095 /* SEND_EGRESS_ERR_STATUS read-only */
13096 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13097 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13098 /* SEND_EGRESS_ERR_FORCE leave alone */
13099 write_csr(dd, SEND_BTH_QP, 0);
13100 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13101 write_csr(dd, SEND_SC2VLT0, 0);
13102 write_csr(dd, SEND_SC2VLT1, 0);
13103 write_csr(dd, SEND_SC2VLT2, 0);
13104 write_csr(dd, SEND_SC2VLT3, 0);
13105 write_csr(dd, SEND_LEN_CHECK0, 0);
13106 write_csr(dd, SEND_LEN_CHECK1, 0);
13107 /* SEND_ERR_STATUS read-only */
13108 write_csr(dd, SEND_ERR_MASK, 0);
13109 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13110 /* SEND_ERR_FORCE read-only */
13111 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013112 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013113 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013114 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13115 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13116 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013117 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013118 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013119 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013120 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013121 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013122 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013123 /* SEND_CM_CREDIT_USED_STATUS read-only */
13124 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13125 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13126 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13127 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13128 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13129 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013130 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013131 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13132 /* SEND_CM_CREDIT_USED_VL read-only */
13133 /* SEND_CM_CREDIT_USED_VL15 read-only */
13134 /* SEND_EGRESS_CTXT_STATUS read-only */
13135 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13136 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13137 /* SEND_EGRESS_ERR_INFO read-only */
13138 /* SEND_EGRESS_ERR_SOURCE read-only */
13139
13140 /*
13141 * TXE Per-Context CSRs
13142 */
13143 for (i = 0; i < dd->chip_send_contexts; i++) {
13144 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13145 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13146 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13147 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13148 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13149 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13150 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13151 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13152 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13153 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13154 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13155 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13156 }
13157
13158 /*
13159 * TXE Per-SDMA CSRs
13160 */
13161 for (i = 0; i < dd->chip_sdma_engines; i++) {
13162 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13163 /* SEND_DMA_STATUS read-only */
13164 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13165 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13166 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13167 /* SEND_DMA_HEAD read-only */
13168 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13169 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13170 /* SEND_DMA_IDLE_CNT read-only */
13171 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13172 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13173 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13174 /* SEND_DMA_ENG_ERR_STATUS read-only */
13175 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13176 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13177 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13178 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13179 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13180 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13181 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13182 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13183 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13184 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13185 }
13186}
13187
13188/*
13189 * Expect on entry:
13190 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13191 */
13192static void init_rbufs(struct hfi1_devdata *dd)
13193{
13194 u64 reg;
13195 int count;
13196
13197 /*
13198 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13199 * clear.
13200 */
13201 count = 0;
13202 while (1) {
13203 reg = read_csr(dd, RCV_STATUS);
13204 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13205 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13206 break;
13207 /*
13208 * Give up after 1ms - maximum wait time.
13209 *
13210 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13211 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13212 * 148 KB / (66% * 250MB/s) = 920us
13213 */
13214 if (count++ > 500) {
13215 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013216 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13217 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013218 break;
13219 }
13220 udelay(2); /* do not busy-wait the CSR */
13221 }
13222
13223 /* start the init - expect RcvCtrl to be 0 */
13224 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13225
13226 /*
13227 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13228 * period after the write before RcvStatus.RxRbufInitDone is valid.
13229 * The delay in the first run through the loop below is sufficient and
13230 * required before the first read of RcvStatus.RxRbufInintDone.
13231 */
13232 read_csr(dd, RCV_CTRL);
13233
13234 /* wait for the init to finish */
13235 count = 0;
13236 while (1) {
13237 /* delay is required first time through - see above */
13238 udelay(2); /* do not busy-wait the CSR */
13239 reg = read_csr(dd, RCV_STATUS);
13240 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13241 break;
13242
13243 /* give up after 100us - slowest possible at 33MHz is 73us */
13244 if (count++ > 50) {
13245 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013246 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13247 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013248 break;
13249 }
13250 }
13251}
13252
13253/* set RXE CSRs to chip reset defaults */
13254static void reset_rxe_csrs(struct hfi1_devdata *dd)
13255{
13256 int i, j;
13257
13258 /*
13259 * RXE Kernel CSRs
13260 */
13261 write_csr(dd, RCV_CTRL, 0);
13262 init_rbufs(dd);
13263 /* RCV_STATUS read-only */
13264 /* RCV_CONTEXTS read-only */
13265 /* RCV_ARRAY_CNT read-only */
13266 /* RCV_BUF_SIZE read-only */
13267 write_csr(dd, RCV_BTH_QP, 0);
13268 write_csr(dd, RCV_MULTICAST, 0);
13269 write_csr(dd, RCV_BYPASS, 0);
13270 write_csr(dd, RCV_VL15, 0);
13271 /* this is a clear-down */
13272 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013273 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013274 /* RCV_ERR_STATUS read-only */
13275 write_csr(dd, RCV_ERR_MASK, 0);
13276 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13277 /* RCV_ERR_FORCE leave alone */
13278 for (i = 0; i < 32; i++)
13279 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13280 for (i = 0; i < 4; i++)
13281 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13282 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13283 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13284 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13285 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13286 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13287 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13288 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13289 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13290 }
13291 for (i = 0; i < 32; i++)
13292 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13293
13294 /*
13295 * RXE Kernel and User Per-Context CSRs
13296 */
13297 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13298 /* kernel */
13299 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13300 /* RCV_CTXT_STATUS read-only */
13301 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13302 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13303 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13304 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13305 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13306 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13307 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13308 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13309 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13310 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13311
13312 /* user */
13313 /* RCV_HDR_TAIL read-only */
13314 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13315 /* RCV_EGR_INDEX_TAIL read-only */
13316 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13317 /* RCV_EGR_OFFSET_TAIL read-only */
13318 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013319 write_uctxt_csr(dd, i,
13320 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013321 }
13322 }
13323}
13324
13325/*
13326 * Set sc2vl tables.
13327 *
13328 * They power on to zeros, so to avoid send context errors
13329 * they need to be set:
13330 *
13331 * SC 0-7 -> VL 0-7 (respectively)
13332 * SC 15 -> VL 15
13333 * otherwise
13334 * -> VL 0
13335 */
13336static void init_sc2vl_tables(struct hfi1_devdata *dd)
13337{
13338 int i;
13339 /* init per architecture spec, constrained by hardware capability */
13340
13341 /* HFI maps sent packets */
13342 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13343 0,
13344 0, 0, 1, 1,
13345 2, 2, 3, 3,
13346 4, 4, 5, 5,
13347 6, 6, 7, 7));
13348 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13349 1,
13350 8, 0, 9, 0,
13351 10, 0, 11, 0,
13352 12, 0, 13, 0,
13353 14, 0, 15, 15));
13354 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13355 2,
13356 16, 0, 17, 0,
13357 18, 0, 19, 0,
13358 20, 0, 21, 0,
13359 22, 0, 23, 0));
13360 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13361 3,
13362 24, 0, 25, 0,
13363 26, 0, 27, 0,
13364 28, 0, 29, 0,
13365 30, 0, 31, 0));
13366
13367 /* DC maps received packets */
13368 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13369 15_0,
13370 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13371 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13372 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13373 31_16,
13374 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13375 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13376
13377 /* initialize the cached sc2vl values consistently with h/w */
13378 for (i = 0; i < 32; i++) {
13379 if (i < 8 || i == 15)
13380 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13381 else
13382 *((u8 *)(dd->sc2vl) + i) = 0;
13383 }
13384}
13385
13386/*
13387 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13388 * depend on the chip going through a power-on reset - a driver may be loaded
13389 * and unloaded many times.
13390 *
13391 * Do not write any CSR values to the chip in this routine - there may be
13392 * a reset following the (possible) FLR in this routine.
13393 *
13394 */
13395static void init_chip(struct hfi1_devdata *dd)
13396{
13397 int i;
13398
13399 /*
13400 * Put the HFI CSRs in a known state.
13401 * Combine this with a DC reset.
13402 *
13403 * Stop the device from doing anything while we do a
13404 * reset. We know there are no other active users of
13405 * the device since we are now in charge. Turn off
13406 * off all outbound and inbound traffic and make sure
13407 * the device does not generate any interrupts.
13408 */
13409
13410 /* disable send contexts and SDMA engines */
13411 write_csr(dd, SEND_CTRL, 0);
13412 for (i = 0; i < dd->chip_send_contexts; i++)
13413 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13414 for (i = 0; i < dd->chip_sdma_engines; i++)
13415 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13416 /* disable port (turn off RXE inbound traffic) and contexts */
13417 write_csr(dd, RCV_CTRL, 0);
13418 for (i = 0; i < dd->chip_rcv_contexts; i++)
13419 write_csr(dd, RCV_CTXT_CTRL, 0);
13420 /* mask all interrupt sources */
13421 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013422 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013423
13424 /*
13425 * DC Reset: do a full DC reset before the register clear.
13426 * A recommended length of time to hold is one CSR read,
13427 * so reread the CceDcCtrl. Then, hold the DC in reset
13428 * across the clear.
13429 */
13430 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013431 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013432
13433 if (use_flr) {
13434 /*
13435 * A FLR will reset the SPC core and part of the PCIe.
13436 * The parts that need to be restored have already been
13437 * saved.
13438 */
13439 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13440
13441 /* do the FLR, the DC reset will remain */
13442 hfi1_pcie_flr(dd);
13443
13444 /* restore command and BARs */
13445 restore_pci_variables(dd);
13446
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013447 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013448 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13449 hfi1_pcie_flr(dd);
13450 restore_pci_variables(dd);
13451 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013452 } else {
13453 dd_dev_info(dd, "Resetting CSRs with writes\n");
13454 reset_cce_csrs(dd);
13455 reset_txe_csrs(dd);
13456 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013457 reset_misc_csrs(dd);
13458 }
13459 /* clear the DC reset */
13460 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013461
Mike Marciniszyn77241052015-07-30 15:17:43 -040013462 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013463 setextled(dd, 0);
13464
Mike Marciniszyn77241052015-07-30 15:17:43 -040013465 /*
13466 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013467 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013468 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013469 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013470 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013471 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013472 * I2CCLK and I2CDAT will change per direction, and INT_N and
13473 * MODPRS_N are input only and their value is ignored.
13474 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013475 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13476 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013477 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013478}
13479
13480static void init_early_variables(struct hfi1_devdata *dd)
13481{
13482 int i;
13483
13484 /* assign link credit variables */
13485 dd->vau = CM_VAU;
13486 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013487 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013488 dd->link_credits--;
13489 dd->vcu = cu_to_vcu(hfi1_cu);
13490 /* enough room for 8 MAD packets plus header - 17K */
13491 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13492 if (dd->vl15_init > dd->link_credits)
13493 dd->vl15_init = dd->link_credits;
13494
13495 write_uninitialized_csrs_and_memories(dd);
13496
13497 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13498 for (i = 0; i < dd->num_pports; i++) {
13499 struct hfi1_pportdata *ppd = &dd->pport[i];
13500
13501 set_partition_keys(ppd);
13502 }
13503 init_sc2vl_tables(dd);
13504}
13505
13506static void init_kdeth_qp(struct hfi1_devdata *dd)
13507{
13508 /* user changed the KDETH_QP */
13509 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13510 /* out of range or illegal value */
13511 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13512 kdeth_qp = 0;
13513 }
13514 if (kdeth_qp == 0) /* not set, or failed range check */
13515 kdeth_qp = DEFAULT_KDETH_QP;
13516
13517 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013518 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13519 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013520
13521 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013522 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13523 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013524}
13525
13526/**
13527 * init_qpmap_table
13528 * @dd - device data
13529 * @first_ctxt - first context
13530 * @last_ctxt - first context
13531 *
13532 * This return sets the qpn mapping table that
13533 * is indexed by qpn[8:1].
13534 *
13535 * The routine will round robin the 256 settings
13536 * from first_ctxt to last_ctxt.
13537 *
13538 * The first/last looks ahead to having specialized
13539 * receive contexts for mgmt and bypass. Normal
13540 * verbs traffic will assumed to be on a range
13541 * of receive contexts.
13542 */
13543static void init_qpmap_table(struct hfi1_devdata *dd,
13544 u32 first_ctxt,
13545 u32 last_ctxt)
13546{
13547 u64 reg = 0;
13548 u64 regno = RCV_QP_MAP_TABLE;
13549 int i;
13550 u64 ctxt = first_ctxt;
13551
Dean Luick60d585ad2016-04-12 10:50:35 -070013552 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013553 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013554 ctxt++;
13555 if (ctxt > last_ctxt)
13556 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070013557 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013558 write_csr(dd, regno, reg);
13559 reg = 0;
13560 regno += 8;
13561 }
13562 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013563
13564 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13565 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13566}
13567
Dean Luick372cc85a2016-04-12 11:30:51 -070013568struct rsm_map_table {
13569 u64 map[NUM_MAP_REGS];
13570 unsigned int used;
13571};
13572
Dean Luickb12349a2016-04-12 11:31:33 -070013573struct rsm_rule_data {
13574 u8 offset;
13575 u8 pkt_type;
13576 u32 field1_off;
13577 u32 field2_off;
13578 u32 index1_off;
13579 u32 index1_width;
13580 u32 index2_off;
13581 u32 index2_width;
13582 u32 mask1;
13583 u32 value1;
13584 u32 mask2;
13585 u32 value2;
13586};
13587
Dean Luick372cc85a2016-04-12 11:30:51 -070013588/*
13589 * Return an initialized RMT map table for users to fill in. OK if it
13590 * returns NULL, indicating no table.
13591 */
13592static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13593{
13594 struct rsm_map_table *rmt;
13595 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13596
13597 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13598 if (rmt) {
13599 memset(rmt->map, rxcontext, sizeof(rmt->map));
13600 rmt->used = 0;
13601 }
13602
13603 return rmt;
13604}
13605
13606/*
13607 * Write the final RMT map table to the chip and free the table. OK if
13608 * table is NULL.
13609 */
13610static void complete_rsm_map_table(struct hfi1_devdata *dd,
13611 struct rsm_map_table *rmt)
13612{
13613 int i;
13614
13615 if (rmt) {
13616 /* write table to chip */
13617 for (i = 0; i < NUM_MAP_REGS; i++)
13618 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13619
13620 /* enable RSM */
13621 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13622 }
13623}
13624
Dean Luickb12349a2016-04-12 11:31:33 -070013625/*
13626 * Add a receive side mapping rule.
13627 */
13628static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13629 struct rsm_rule_data *rrd)
13630{
13631 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13632 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13633 1ull << rule_index | /* enable bit */
13634 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13635 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13636 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13637 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13638 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13639 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13640 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13641 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13642 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13643 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13644 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13645 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13646 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13647}
13648
Dean Luick4a818be2016-04-12 11:31:11 -070013649/* return the number of RSM map table entries that will be used for QOS */
13650static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13651 unsigned int *np)
13652{
13653 int i;
13654 unsigned int m, n;
13655 u8 max_by_vl = 0;
13656
13657 /* is QOS active at all? */
13658 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13659 num_vls == 1 ||
13660 krcvqsset <= 1)
13661 goto no_qos;
13662
13663 /* determine bits for qpn */
13664 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13665 if (krcvqs[i] > max_by_vl)
13666 max_by_vl = krcvqs[i];
13667 if (max_by_vl > 32)
13668 goto no_qos;
13669 m = ilog2(__roundup_pow_of_two(max_by_vl));
13670
13671 /* determine bits for vl */
13672 n = ilog2(__roundup_pow_of_two(num_vls));
13673
13674 /* reject if too much is used */
13675 if ((m + n) > 7)
13676 goto no_qos;
13677
13678 if (mp)
13679 *mp = m;
13680 if (np)
13681 *np = n;
13682
13683 return 1 << (m + n);
13684
13685no_qos:
13686 if (mp)
13687 *mp = 0;
13688 if (np)
13689 *np = 0;
13690 return 0;
13691}
13692
Mike Marciniszyn77241052015-07-30 15:17:43 -040013693/**
13694 * init_qos - init RX qos
13695 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070013696 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040013697 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013698 * This routine initializes Rule 0 and the RSM map table to implement
13699 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040013700 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013701 * If all of the limit tests succeed, qos is applied based on the array
13702 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013703 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013704 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13705 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013706 */
Dean Luick372cc85a2016-04-12 11:30:51 -070013707static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013708{
Dean Luickb12349a2016-04-12 11:31:33 -070013709 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013710 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070013711 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013712 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013713
Dean Luick4a818be2016-04-12 11:31:11 -070013714 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013715 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013716 rmt_entries = qos_rmt_entries(dd, &m, &n);
13717 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013718 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013719 qpns_per_vl = 1 << m;
13720
Dean Luick372cc85a2016-04-12 11:30:51 -070013721 /* enough room in the map table? */
13722 rmt_entries = 1 << (m + n);
13723 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013724 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013725
Dean Luick372cc85a2016-04-12 11:30:51 -070013726 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070013727 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013728 unsigned tctxt;
13729
13730 for (qpn = 0, tctxt = ctxt;
13731 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13732 unsigned idx, regoff, regidx;
13733
Dean Luick372cc85a2016-04-12 11:30:51 -070013734 /* generate the index the hardware will produce */
13735 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013736 regoff = (idx % 8) * 8;
13737 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070013738 /* replace default with context number */
13739 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040013740 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13741 << regoff);
13742 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070013743 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013744 if (tctxt == ctxt + krcvqs[i])
13745 tctxt = ctxt;
13746 }
13747 ctxt += krcvqs[i];
13748 }
Dean Luickb12349a2016-04-12 11:31:33 -070013749
13750 rrd.offset = rmt->used;
13751 rrd.pkt_type = 2;
13752 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13753 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13754 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13755 rrd.index1_width = n;
13756 rrd.index2_off = QPN_SELECT_OFFSET;
13757 rrd.index2_width = m + n;
13758 rrd.mask1 = LRH_BTH_MASK;
13759 rrd.value1 = LRH_BTH_VALUE;
13760 rrd.mask2 = LRH_SC_MASK;
13761 rrd.value2 = LRH_SC_VALUE;
13762
13763 /* add rule 0 */
13764 add_rsm_rule(dd, 0, &rrd);
13765
Dean Luick372cc85a2016-04-12 11:30:51 -070013766 /* mark RSM map entries as used */
13767 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070013768 /* map everything else to the mcast/err/vl15 context */
13769 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013770 dd->qos_shift = n + 1;
13771 return;
13772bail:
13773 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013774 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013775}
13776
Dean Luick8f000f72016-04-12 11:32:06 -070013777static void init_user_fecn_handling(struct hfi1_devdata *dd,
13778 struct rsm_map_table *rmt)
13779{
13780 struct rsm_rule_data rrd;
13781 u64 reg;
13782 int i, idx, regoff, regidx;
13783 u8 offset;
13784
13785 /* there needs to be enough room in the map table */
13786 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13787 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13788 return;
13789 }
13790
13791 /*
13792 * RSM will extract the destination context as an index into the
13793 * map table. The destination contexts are a sequential block
13794 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13795 * Map entries are accessed as offset + extracted value. Adjust
13796 * the added offset so this sequence can be placed anywhere in
13797 * the table - as long as the entries themselves do not wrap.
13798 * There are only enough bits in offset for the table size, so
13799 * start with that to allow for a "negative" offset.
13800 */
13801 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13802 (int)dd->first_user_ctxt);
13803
13804 for (i = dd->first_user_ctxt, idx = rmt->used;
13805 i < dd->num_rcv_contexts; i++, idx++) {
13806 /* replace with identity mapping */
13807 regoff = (idx % 8) * 8;
13808 regidx = idx / 8;
13809 reg = rmt->map[regidx];
13810 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13811 reg |= (u64)i << regoff;
13812 rmt->map[regidx] = reg;
13813 }
13814
13815 /*
13816 * For RSM intercept of Expected FECN packets:
13817 * o packet type 0 - expected
13818 * o match on F (bit 95), using select/match 1, and
13819 * o match on SH (bit 133), using select/match 2.
13820 *
13821 * Use index 1 to extract the 8-bit receive context from DestQP
13822 * (start at bit 64). Use that as the RSM map table index.
13823 */
13824 rrd.offset = offset;
13825 rrd.pkt_type = 0;
13826 rrd.field1_off = 95;
13827 rrd.field2_off = 133;
13828 rrd.index1_off = 64;
13829 rrd.index1_width = 8;
13830 rrd.index2_off = 0;
13831 rrd.index2_width = 0;
13832 rrd.mask1 = 1;
13833 rrd.value1 = 1;
13834 rrd.mask2 = 1;
13835 rrd.value2 = 1;
13836
13837 /* add rule 1 */
13838 add_rsm_rule(dd, 1, &rrd);
13839
13840 rmt->used += dd->num_user_contexts;
13841}
13842
Mike Marciniszyn77241052015-07-30 15:17:43 -040013843static void init_rxe(struct hfi1_devdata *dd)
13844{
Dean Luick372cc85a2016-04-12 11:30:51 -070013845 struct rsm_map_table *rmt;
13846
Mike Marciniszyn77241052015-07-30 15:17:43 -040013847 /* enable all receive errors */
13848 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070013849
13850 rmt = alloc_rsm_map_table(dd);
13851 /* set up QOS, including the QPN map table */
13852 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070013853 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070013854 complete_rsm_map_table(dd, rmt);
13855 kfree(rmt);
13856
Mike Marciniszyn77241052015-07-30 15:17:43 -040013857 /*
13858 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13859 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13860 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13861 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13862 * Max_PayLoad_Size set to its minimum of 128.
13863 *
13864 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13865 * (64 bytes). Max_Payload_Size is possibly modified upward in
13866 * tune_pcie_caps() which is called after this routine.
13867 */
13868}
13869
13870static void init_other(struct hfi1_devdata *dd)
13871{
13872 /* enable all CCE errors */
13873 write_csr(dd, CCE_ERR_MASK, ~0ull);
13874 /* enable *some* Misc errors */
13875 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13876 /* enable all DC errors, except LCB */
13877 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13878 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13879}
13880
13881/*
13882 * Fill out the given AU table using the given CU. A CU is defined in terms
13883 * AUs. The table is a an encoding: given the index, how many AUs does that
13884 * represent?
13885 *
13886 * NOTE: Assumes that the register layout is the same for the
13887 * local and remote tables.
13888 */
13889static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13890 u32 csr0to3, u32 csr4to7)
13891{
13892 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080013893 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13894 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13895 2ull * cu <<
13896 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13897 4ull * cu <<
13898 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013899 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080013900 8ull * cu <<
13901 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13902 16ull * cu <<
13903 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13904 32ull * cu <<
13905 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13906 64ull * cu <<
13907 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013908}
13909
13910static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13911{
13912 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013913 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013914}
13915
13916void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13917{
13918 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013919 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013920}
13921
13922static void init_txe(struct hfi1_devdata *dd)
13923{
13924 int i;
13925
13926 /* enable all PIO, SDMA, general, and Egress errors */
13927 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13928 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13929 write_csr(dd, SEND_ERR_MASK, ~0ull);
13930 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13931
13932 /* enable all per-context and per-SDMA engine errors */
13933 for (i = 0; i < dd->chip_send_contexts; i++)
13934 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13935 for (i = 0; i < dd->chip_sdma_engines; i++)
13936 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13937
13938 /* set the local CU to AU mapping */
13939 assign_local_cm_au_table(dd, dd->vcu);
13940
13941 /*
13942 * Set reasonable default for Credit Return Timer
13943 * Don't set on Simulator - causes it to choke.
13944 */
13945 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13946 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13947}
13948
13949int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13950{
13951 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13952 unsigned sctxt;
13953 int ret = 0;
13954 u64 reg;
13955
13956 if (!rcd || !rcd->sc) {
13957 ret = -EINVAL;
13958 goto done;
13959 }
13960 sctxt = rcd->sc->hw_context;
13961 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13962 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13963 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13964 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13965 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13966 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13967 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13968 /*
13969 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013970 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013971 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013972 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13973 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13974 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13975 }
13976
13977 /* Enable J_KEY check on receive context. */
13978 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13979 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13980 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13981 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13982done:
13983 return ret;
13984}
13985
13986int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13987{
13988 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13989 unsigned sctxt;
13990 int ret = 0;
13991 u64 reg;
13992
13993 if (!rcd || !rcd->sc) {
13994 ret = -EINVAL;
13995 goto done;
13996 }
13997 sctxt = rcd->sc->hw_context;
13998 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13999 /*
14000 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14001 * This check would not have been enabled for A0 h/w, see
14002 * set_ctxt_jkey().
14003 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014004 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014005 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14006 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14007 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14008 }
14009 /* Turn off the J_KEY on the receive side */
14010 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14011done:
14012 return ret;
14013}
14014
14015int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14016{
14017 struct hfi1_ctxtdata *rcd;
14018 unsigned sctxt;
14019 int ret = 0;
14020 u64 reg;
14021
Jubin Johne4909742016-02-14 20:22:00 -080014022 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014023 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014024 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014025 ret = -EINVAL;
14026 goto done;
14027 }
14028 if (!rcd || !rcd->sc) {
14029 ret = -EINVAL;
14030 goto done;
14031 }
14032 sctxt = rcd->sc->hw_context;
14033 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14034 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14035 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14036 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14037 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014038 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014039 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14040done:
14041 return ret;
14042}
14043
14044int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14045{
14046 struct hfi1_ctxtdata *rcd;
14047 unsigned sctxt;
14048 int ret = 0;
14049 u64 reg;
14050
Jubin Johne4909742016-02-14 20:22:00 -080014051 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014052 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014053 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014054 ret = -EINVAL;
14055 goto done;
14056 }
14057 if (!rcd || !rcd->sc) {
14058 ret = -EINVAL;
14059 goto done;
14060 }
14061 sctxt = rcd->sc->hw_context;
14062 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14063 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14064 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14065 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14066done:
14067 return ret;
14068}
14069
14070/*
14071 * Start doing the clean up the the chip. Our clean up happens in multiple
14072 * stages and this is just the first.
14073 */
14074void hfi1_start_cleanup(struct hfi1_devdata *dd)
14075{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014076 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014077 free_cntrs(dd);
14078 free_rcverr(dd);
14079 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014080 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014081}
14082
14083#define HFI_BASE_GUID(dev) \
14084 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14085
14086/*
Dean Luick78eb1292016-03-05 08:49:45 -080014087 * Information can be shared between the two HFIs on the same ASIC
14088 * in the same OS. This function finds the peer device and sets
14089 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014090 */
Dean Luick78eb1292016-03-05 08:49:45 -080014091static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014092{
14093 unsigned long flags;
14094 struct hfi1_devdata *tmp, *peer = NULL;
Dean Luick78eb1292016-03-05 08:49:45 -080014095 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014096
14097 spin_lock_irqsave(&hfi1_devs_lock, flags);
14098 /* Find our peer device */
14099 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14100 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14101 dd->unit != tmp->unit) {
14102 peer = tmp;
14103 break;
14104 }
14105 }
14106
Dean Luick78eb1292016-03-05 08:49:45 -080014107 if (peer) {
14108 dd->asic_data = peer->asic_data;
14109 } else {
14110 dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14111 if (!dd->asic_data) {
14112 ret = -ENOMEM;
14113 goto done;
14114 }
14115 mutex_init(&dd->asic_data->asic_resource_mutex);
14116 }
14117 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14118
14119done:
Mike Marciniszyn77241052015-07-30 15:17:43 -040014120 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luick78eb1292016-03-05 08:49:45 -080014121 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014122}
14123
Dean Luick5d9157a2015-11-16 21:59:34 -050014124/*
14125 * Set dd->boardname. Use a generic name if a name is not returned from
14126 * EFI variable space.
14127 *
14128 * Return 0 on success, -ENOMEM if space could not be allocated.
14129 */
14130static int obtain_boardname(struct hfi1_devdata *dd)
14131{
14132 /* generic board description */
14133 const char generic[] =
14134 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14135 unsigned long size;
14136 int ret;
14137
14138 ret = read_hfi1_efi_var(dd, "description", &size,
14139 (void **)&dd->boardname);
14140 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014141 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014142 /* use generic description */
14143 dd->boardname = kstrdup(generic, GFP_KERNEL);
14144 if (!dd->boardname)
14145 return -ENOMEM;
14146 }
14147 return 0;
14148}
14149
Kaike Wan24487dd2016-02-26 13:33:23 -080014150/*
14151 * Check the interrupt registers to make sure that they are mapped correctly.
14152 * It is intended to help user identify any mismapping by VMM when the driver
14153 * is running in a VM. This function should only be called before interrupt
14154 * is set up properly.
14155 *
14156 * Return 0 on success, -EINVAL on failure.
14157 */
14158static int check_int_registers(struct hfi1_devdata *dd)
14159{
14160 u64 reg;
14161 u64 all_bits = ~(u64)0;
14162 u64 mask;
14163
14164 /* Clear CceIntMask[0] to avoid raising any interrupts */
14165 mask = read_csr(dd, CCE_INT_MASK);
14166 write_csr(dd, CCE_INT_MASK, 0ull);
14167 reg = read_csr(dd, CCE_INT_MASK);
14168 if (reg)
14169 goto err_exit;
14170
14171 /* Clear all interrupt status bits */
14172 write_csr(dd, CCE_INT_CLEAR, all_bits);
14173 reg = read_csr(dd, CCE_INT_STATUS);
14174 if (reg)
14175 goto err_exit;
14176
14177 /* Set all interrupt status bits */
14178 write_csr(dd, CCE_INT_FORCE, all_bits);
14179 reg = read_csr(dd, CCE_INT_STATUS);
14180 if (reg != all_bits)
14181 goto err_exit;
14182
14183 /* Restore the interrupt mask */
14184 write_csr(dd, CCE_INT_CLEAR, all_bits);
14185 write_csr(dd, CCE_INT_MASK, mask);
14186
14187 return 0;
14188err_exit:
14189 write_csr(dd, CCE_INT_MASK, mask);
14190 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14191 return -EINVAL;
14192}
14193
Mike Marciniszyn77241052015-07-30 15:17:43 -040014194/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014195 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014196 * @dev: the pci_dev for hfi1_ib device
14197 * @ent: pci_device_id struct for this dev
14198 *
14199 * Also allocates, initializes, and returns the devdata struct for this
14200 * device instance
14201 *
14202 * This is global, and is called directly at init to set up the
14203 * chip-specific function pointers for later use.
14204 */
14205struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14206 const struct pci_device_id *ent)
14207{
14208 struct hfi1_devdata *dd;
14209 struct hfi1_pportdata *ppd;
14210 u64 reg;
14211 int i, ret;
14212 static const char * const inames[] = { /* implementation names */
14213 "RTL silicon",
14214 "RTL VCS simulation",
14215 "RTL FPGA emulation",
14216 "Functional simulator"
14217 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014218 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014219
Jubin John17fb4f22016-02-14 20:21:52 -080014220 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14221 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014222 if (IS_ERR(dd))
14223 goto bail;
14224 ppd = dd->pport;
14225 for (i = 0; i < dd->num_pports; i++, ppd++) {
14226 int vl;
14227 /* init common fields */
14228 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14229 /* DC supports 4 link widths */
14230 ppd->link_width_supported =
14231 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14232 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14233 ppd->link_width_downgrade_supported =
14234 ppd->link_width_supported;
14235 /* start out enabling only 4X */
14236 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14237 ppd->link_width_downgrade_enabled =
14238 ppd->link_width_downgrade_supported;
14239 /* link width active is 0 when link is down */
14240 /* link width downgrade active is 0 when link is down */
14241
Jubin Johnd0d236e2016-02-14 20:20:15 -080014242 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14243 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014244 hfi1_early_err(&pdev->dev,
14245 "Invalid num_vls %u, using %u VLs\n",
14246 num_vls, HFI1_MAX_VLS_SUPPORTED);
14247 num_vls = HFI1_MAX_VLS_SUPPORTED;
14248 }
14249 ppd->vls_supported = num_vls;
14250 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014251 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014252 /* Set the default MTU. */
14253 for (vl = 0; vl < num_vls; vl++)
14254 dd->vld[vl].mtu = hfi1_max_mtu;
14255 dd->vld[15].mtu = MAX_MAD_PACKET;
14256 /*
14257 * Set the initial values to reasonable default, will be set
14258 * for real when link is up.
14259 */
14260 ppd->lstate = IB_PORT_DOWN;
14261 ppd->overrun_threshold = 0x4;
14262 ppd->phy_error_threshold = 0xf;
14263 ppd->port_crc_mode_enabled = link_crc_mask;
14264 /* initialize supported LTP CRC mode */
14265 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14266 /* initialize enabled LTP CRC mode */
14267 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14268 /* start in offline */
14269 ppd->host_link_state = HLS_DN_OFFLINE;
14270 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014271 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014272 }
14273
14274 dd->link_default = HLS_DN_POLL;
14275
14276 /*
14277 * Do remaining PCIe setup and save PCIe values in dd.
14278 * Any error printing is already done by the init code.
14279 * On return, we have the chip mapped.
14280 */
14281 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14282 if (ret < 0)
14283 goto bail_free;
14284
14285 /* verify that reads actually work, save revision for reset check */
14286 dd->revision = read_csr(dd, CCE_REVISION);
14287 if (dd->revision == ~(u64)0) {
14288 dd_dev_err(dd, "cannot read chip CSRs\n");
14289 ret = -EINVAL;
14290 goto bail_cleanup;
14291 }
14292 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14293 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14294 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14295 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14296
Jubin John4d114fd2016-02-14 20:21:43 -080014297 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014298 * Check interrupt registers mapping if the driver has no access to
14299 * the upstream component. In this case, it is likely that the driver
14300 * is running in a VM.
14301 */
14302 if (!parent) {
14303 ret = check_int_registers(dd);
14304 if (ret)
14305 goto bail_cleanup;
14306 }
14307
14308 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014309 * obtain the hardware ID - NOT related to unit, which is a
14310 * software enumeration
14311 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014312 reg = read_csr(dd, CCE_REVISION2);
14313 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14314 & CCE_REVISION2_HFI_ID_MASK;
14315 /* the variable size will remove unwanted bits */
14316 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14317 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14318 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014319 dd->icode < ARRAY_SIZE(inames) ?
14320 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014321
14322 /* speeds the hardware can support */
14323 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14324 /* speeds allowed to run at */
14325 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14326 /* give a reasonable active value, will be set on link up */
14327 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14328
14329 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14330 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14331 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14332 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14333 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14334 /* fix up link widths for emulation _p */
14335 ppd = dd->pport;
14336 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14337 ppd->link_width_supported =
14338 ppd->link_width_enabled =
14339 ppd->link_width_downgrade_supported =
14340 ppd->link_width_downgrade_enabled =
14341 OPA_LINK_WIDTH_1X;
14342 }
14343 /* insure num_vls isn't larger than number of sdma engines */
14344 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14345 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014346 num_vls, dd->chip_sdma_engines);
14347 num_vls = dd->chip_sdma_engines;
14348 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014349 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014350 }
14351
14352 /*
14353 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14354 * Limit the max if larger than the field holds. If timeout is
14355 * non-zero, then the calculated field will be at least 1.
14356 *
14357 * Must be after icode is set up - the cclock rate depends
14358 * on knowing the hardware being used.
14359 */
14360 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14361 if (dd->rcv_intr_timeout_csr >
14362 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14363 dd->rcv_intr_timeout_csr =
14364 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14365 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14366 dd->rcv_intr_timeout_csr = 1;
14367
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014368 /* needs to be done before we look for the peer device */
14369 read_guid(dd);
14370
Dean Luick78eb1292016-03-05 08:49:45 -080014371 /* set up shared ASIC data with peer device */
14372 ret = init_asic_data(dd);
14373 if (ret)
14374 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014375
Mike Marciniszyn77241052015-07-30 15:17:43 -040014376 /* obtain chip sizes, reset chip CSRs */
14377 init_chip(dd);
14378
14379 /* read in the PCIe link speed information */
14380 ret = pcie_speeds(dd);
14381 if (ret)
14382 goto bail_cleanup;
14383
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014384 /* Needs to be called before hfi1_firmware_init */
14385 get_platform_config(dd);
14386
Mike Marciniszyn77241052015-07-30 15:17:43 -040014387 /* read in firmware */
14388 ret = hfi1_firmware_init(dd);
14389 if (ret)
14390 goto bail_cleanup;
14391
14392 /*
14393 * In general, the PCIe Gen3 transition must occur after the
14394 * chip has been idled (so it won't initiate any PCIe transactions
14395 * e.g. an interrupt) and before the driver changes any registers
14396 * (the transition will reset the registers).
14397 *
14398 * In particular, place this call after:
14399 * - init_chip() - the chip will not initiate any PCIe transactions
14400 * - pcie_speeds() - reads the current link speed
14401 * - hfi1_firmware_init() - the needed firmware is ready to be
14402 * downloaded
14403 */
14404 ret = do_pcie_gen3_transition(dd);
14405 if (ret)
14406 goto bail_cleanup;
14407
14408 /* start setting dd values and adjusting CSRs */
14409 init_early_variables(dd);
14410
14411 parse_platform_config(dd);
14412
Dean Luick5d9157a2015-11-16 21:59:34 -050014413 ret = obtain_boardname(dd);
14414 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014415 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014416
14417 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014418 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014419 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014420 (u32)dd->majrev,
14421 (u32)dd->minrev,
14422 (dd->revision >> CCE_REVISION_SW_SHIFT)
14423 & CCE_REVISION_SW_MASK);
14424
Jubin John0852d242016-04-12 11:30:08 -070014425 /*
14426 * The real cpu mask is part of the affinity struct but has to be
14427 * initialized earlier than the rest of the affinity struct because it
14428 * is needed to calculate the number of user contexts in
14429 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14430 * which initializes the rest of the affinity struct members,
14431 * depends on set_up_context_variables() for the number of kernel
14432 * contexts, so it cannot be called before set_up_context_variables().
14433 */
14434 ret = init_real_cpu_mask(dd);
14435 if (ret)
14436 goto bail_cleanup;
14437
Mike Marciniszyn77241052015-07-30 15:17:43 -040014438 ret = set_up_context_variables(dd);
14439 if (ret)
14440 goto bail_cleanup;
14441
14442 /* set initial RXE CSRs */
14443 init_rxe(dd);
14444 /* set initial TXE CSRs */
14445 init_txe(dd);
14446 /* set initial non-RXE, non-TXE CSRs */
14447 init_other(dd);
14448 /* set up KDETH QP prefix in both RX and TX CSRs */
14449 init_kdeth_qp(dd);
14450
Jubin John0852d242016-04-12 11:30:08 -070014451 hfi1_dev_affinity_init(dd);
Mitko Haralanov957558c2016-02-03 14:33:40 -080014452
Mike Marciniszyn77241052015-07-30 15:17:43 -040014453 /* send contexts must be set up before receive contexts */
14454 ret = init_send_contexts(dd);
14455 if (ret)
14456 goto bail_cleanup;
14457
14458 ret = hfi1_create_ctxts(dd);
14459 if (ret)
14460 goto bail_cleanup;
14461
14462 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14463 /*
14464 * rcd[0] is guaranteed to be valid by this point. Also, all
14465 * context are using the same value, as per the module parameter.
14466 */
14467 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14468
14469 ret = init_pervl_scs(dd);
14470 if (ret)
14471 goto bail_cleanup;
14472
14473 /* sdma init */
14474 for (i = 0; i < dd->num_pports; ++i) {
14475 ret = sdma_init(dd, i);
14476 if (ret)
14477 goto bail_cleanup;
14478 }
14479
14480 /* use contexts created by hfi1_create_ctxts */
14481 ret = set_up_interrupts(dd);
14482 if (ret)
14483 goto bail_cleanup;
14484
14485 /* set up LCB access - must be after set_up_interrupts() */
14486 init_lcb_access(dd);
14487
14488 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14489 dd->base_guid & 0xFFFFFF);
14490
14491 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14492 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14493 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14494
14495 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14496 if (ret)
14497 goto bail_clear_intr;
14498 check_fabric_firmware_versions(dd);
14499
14500 thermal_init(dd);
14501
14502 ret = init_cntrs(dd);
14503 if (ret)
14504 goto bail_clear_intr;
14505
14506 ret = init_rcverr(dd);
14507 if (ret)
14508 goto bail_free_cntrs;
14509
14510 ret = eprom_init(dd);
14511 if (ret)
14512 goto bail_free_rcverr;
14513
14514 goto bail;
14515
14516bail_free_rcverr:
14517 free_rcverr(dd);
14518bail_free_cntrs:
14519 free_cntrs(dd);
14520bail_clear_intr:
14521 clean_up_interrupts(dd);
14522bail_cleanup:
14523 hfi1_pcie_ddcleanup(dd);
14524bail_free:
14525 hfi1_free_devdata(dd);
14526 dd = ERR_PTR(ret);
14527bail:
14528 return dd;
14529}
14530
14531static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14532 u32 dw_len)
14533{
14534 u32 delta_cycles;
14535 u32 current_egress_rate = ppd->current_egress_rate;
14536 /* rates here are in units of 10^6 bits/sec */
14537
14538 if (desired_egress_rate == -1)
14539 return 0; /* shouldn't happen */
14540
14541 if (desired_egress_rate >= current_egress_rate)
14542 return 0; /* we can't help go faster, only slower */
14543
14544 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14545 egress_cycles(dw_len * 4, current_egress_rate);
14546
14547 return (u16)delta_cycles;
14548}
14549
Mike Marciniszyn77241052015-07-30 15:17:43 -040014550/**
14551 * create_pbc - build a pbc for transmission
14552 * @flags: special case flags or-ed in built pbc
14553 * @srate: static rate
14554 * @vl: vl
14555 * @dwlen: dword length (header words + data words + pbc words)
14556 *
14557 * Create a PBC with the given flags, rate, VL, and length.
14558 *
14559 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14560 * for verbs, which does not use this PSM feature. The lone other caller
14561 * is for the diagnostic interface which calls this if the user does not
14562 * supply their own PBC.
14563 */
14564u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14565 u32 dw_len)
14566{
14567 u64 pbc, delay = 0;
14568
14569 if (unlikely(srate_mbs))
14570 delay = delay_cycles(ppd, srate_mbs, dw_len);
14571
14572 pbc = flags
14573 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14574 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14575 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14576 | (dw_len & PBC_LENGTH_DWS_MASK)
14577 << PBC_LENGTH_DWS_SHIFT;
14578
14579 return pbc;
14580}
14581
14582#define SBUS_THERMAL 0x4f
14583#define SBUS_THERM_MONITOR_MODE 0x1
14584
14585#define THERM_FAILURE(dev, ret, reason) \
14586 dd_dev_err((dd), \
14587 "Thermal sensor initialization failed: %s (%d)\n", \
14588 (reason), (ret))
14589
14590/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070014591 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014592 *
14593 * After initialization, enable polling of thermal sensor through
14594 * SBus interface. In order for this to work, the SBus Master
14595 * firmware has to be loaded due to the fact that the HW polling
14596 * logic uses SBus interrupts, which are not supported with
14597 * default firmware. Otherwise, no data will be returned through
14598 * the ASIC_STS_THERM CSR.
14599 */
14600static int thermal_init(struct hfi1_devdata *dd)
14601{
14602 int ret = 0;
14603
14604 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080014605 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014606 return ret;
14607
Dean Luick576531f2016-03-05 08:50:01 -080014608 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14609 if (ret) {
14610 THERM_FAILURE(dd, ret, "Acquire SBus");
14611 return ret;
14612 }
14613
Mike Marciniszyn77241052015-07-30 15:17:43 -040014614 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014615 /* Disable polling of thermal readings */
14616 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14617 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014618 /* Thermal Sensor Initialization */
14619 /* Step 1: Reset the Thermal SBus Receiver */
14620 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14621 RESET_SBUS_RECEIVER, 0);
14622 if (ret) {
14623 THERM_FAILURE(dd, ret, "Bus Reset");
14624 goto done;
14625 }
14626 /* Step 2: Set Reset bit in Thermal block */
14627 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14628 WRITE_SBUS_RECEIVER, 0x1);
14629 if (ret) {
14630 THERM_FAILURE(dd, ret, "Therm Block Reset");
14631 goto done;
14632 }
14633 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14634 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14635 WRITE_SBUS_RECEIVER, 0x32);
14636 if (ret) {
14637 THERM_FAILURE(dd, ret, "Write Clock Div");
14638 goto done;
14639 }
14640 /* Step 4: Select temperature mode */
14641 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14642 WRITE_SBUS_RECEIVER,
14643 SBUS_THERM_MONITOR_MODE);
14644 if (ret) {
14645 THERM_FAILURE(dd, ret, "Write Mode Sel");
14646 goto done;
14647 }
14648 /* Step 5: De-assert block reset and start conversion */
14649 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14650 WRITE_SBUS_RECEIVER, 0x2);
14651 if (ret) {
14652 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14653 goto done;
14654 }
14655 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14656 msleep(22);
14657
14658 /* Enable polling of thermal readings */
14659 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080014660
14661 /* Set initialized flag */
14662 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14663 if (ret)
14664 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14665
Mike Marciniszyn77241052015-07-30 15:17:43 -040014666done:
Dean Luick576531f2016-03-05 08:50:01 -080014667 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014668 return ret;
14669}
14670
14671static void handle_temp_err(struct hfi1_devdata *dd)
14672{
14673 struct hfi1_pportdata *ppd = &dd->pport[0];
14674 /*
14675 * Thermal Critical Interrupt
14676 * Put the device into forced freeze mode, take link down to
14677 * offline, and put DC into reset.
14678 */
14679 dd_dev_emerg(dd,
14680 "Critical temperature reached! Forcing device into freeze mode!\n");
14681 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014682 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014683 /*
14684 * Shut DC down as much and as quickly as possible.
14685 *
14686 * Step 1: Take the link down to OFFLINE. This will cause the
14687 * 8051 to put the Serdes in reset. However, we don't want to
14688 * go through the entire link state machine since we want to
14689 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14690 * but rather an attempt to save the chip.
14691 * Code below is almost the same as quiet_serdes() but avoids
14692 * all the extra work and the sleeps.
14693 */
14694 ppd->driver_link_ready = 0;
14695 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080014696 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14697 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014698 /*
14699 * Step 2: Shutdown LCB and 8051
14700 * After shutdown, do not restore DC_CFG_RESET value.
14701 */
14702 dc_shutdown(dd);
14703}