blob: c533d44223997f8def9cc7e9bdf9a6868e809355 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040066
67#define NUM_IB_PORTS 1
68
69uint kdeth_qp;
70module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74module_param(num_vls, uint, S_IRUGO);
75MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77/*
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
83 */
84uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85module_param(rcv_intr_timeout, uint, S_IRUGO);
86MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88uint rcv_intr_count = 16; /* same as qib */
89module_param(rcv_intr_count, uint, S_IRUGO);
90MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92ushort link_crc_mask = SUPPORTED_CRCS;
93module_param(link_crc_mask, ushort, S_IRUGO);
94MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96uint loopback;
97module_param_named(loopback, loopback, uint, S_IRUGO);
98MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100/* Other driver tunables */
101uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102static ushort crc_14b_sideband = 1;
103static uint use_flr = 1;
104uint quick_linkup; /* skip LNI */
105
106struct flag_table {
107 u64 flag; /* the flag */
108 char *str; /* description string */
109 u16 extra; /* extra information */
110 u16 unused0;
111 u32 unused1;
112};
113
114/* str must be a string constant */
115#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116#define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118/* Send Error Consequences */
119#define SEC_WRITE_DROPPED 0x1
120#define SEC_PACKET_DROPPED 0x2
121#define SEC_SC_HALTED 0x4 /* per-context only */
122#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
123
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500125#define FIRST_KERNEL_KCTXT 1
Dean Luick372cc85a2016-04-12 11:30:51 -0700126/* sizes for both the QP and RSM map tables */
127#define NUM_MAP_ENTRIES 256
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800424 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800427 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800513/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514#define PORT_DISCARD_EGRESS_ERRS \
515 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518
Mike Marciniszyn77241052015-07-30 15:17:43 -0400519/*
520 * TXE Egress Error flags
521 */
522#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523static struct flag_table egress_err_status_flags[] = {
524/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526/* 2 reserved */
527/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531/* 6 reserved */
532/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536/* 9-10 reserved */
537/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 SEES(TX_SDMA0_DISALLOWED_PACKET)),
545/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 SEES(TX_SDMA1_DISALLOWED_PACKET)),
547/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 SEES(TX_SDMA2_DISALLOWED_PACKET)),
549/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 SEES(TX_SDMA3_DISALLOWED_PACKET)),
551/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 SEES(TX_SDMA4_DISALLOWED_PACKET)),
553/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 SEES(TX_SDMA5_DISALLOWED_PACKET)),
555/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 SEES(TX_SDMA6_DISALLOWED_PACKET)),
557/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 SEES(TX_SDMA7_DISALLOWED_PACKET)),
559/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 SEES(TX_SDMA8_DISALLOWED_PACKET)),
561/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 SEES(TX_SDMA9_DISALLOWED_PACKET)),
563/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 SEES(TX_SDMA10_DISALLOWED_PACKET)),
565/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 SEES(TX_SDMA11_DISALLOWED_PACKET)),
567/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 SEES(TX_SDMA12_DISALLOWED_PACKET)),
569/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 SEES(TX_SDMA13_DISALLOWED_PACKET)),
571/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 SEES(TX_SDMA14_DISALLOWED_PACKET)),
573/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 SEES(TX_SDMA15_DISALLOWED_PACKET)),
575/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618};
619
620/*
621 * TXE Egress Error Info flags
622 */
623#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624static struct flag_table egress_err_info_flags[] = {
625/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
626/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
627/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
634/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647};
648
649/* TXE Egress errors that cause an SPC freeze */
650#define ALL_TXE_EGRESS_FREEZE_ERR \
651 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 | SEES(TX_LAUNCH_CSR_PARITY) \
656 | SEES(TX_SBRD_CTL_CSR_PARITY) \
657 | SEES(TX_CONFIG_PARITY) \
658 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 | SEES(TX_CREDIT_RETURN_PARITY))
668
669/*
670 * TXE Send error flags
671 */
672#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500674/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400675/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677};
678
679/*
680 * TXE Send Context Error flags and consequences
681 */
682static struct flag_table sc_err_status_flags[] = {
683/* 0*/ FLAG_ENTRY("InconsistentSop",
684 SEC_PACKET_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686/* 1*/ FLAG_ENTRY("DisallowedPacket",
687 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692/* 3*/ FLAG_ENTRY("WriteOverflow",
693 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698/* 5-63 reserved*/
699};
700
701/*
702 * RXE Receive Error flags
703 */
704#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705static struct flag_table rxe_err_status_flags[] = {
706/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 RXES(RBUF_BLOCK_LIST_READ_UNC)),
728/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 RXES(RBUF_BLOCK_LIST_READ_COR)),
730/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 RXES(RBUF_CSR_QENT_CNT_PARITY)),
734/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 RXES(RBUF_FL_INITDONE_PARITY)),
751/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 RXES(LOOKUP_DES_PART1_UNC_COR)),
758/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 RXES(LOOKUP_DES_PART2_PARITY)),
760/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782};
783
784/* RXE errors that will trigger an SPC freeze */
785#define ALL_RXE_FREEZE_ERR \
786 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830
831#define RXE_FREEZE_ABORT_MASK \
832 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835
836/*
837 * DCC Error Flags
838 */
839#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840static struct flag_table dcc_err_flags[] = {
841 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887};
888
889/*
890 * LCB error flags
891 */
892#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893static struct flag_table lcb_err_flags[] = {
894/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 LCBE(REDUNDANT_FLIT_PARITY_ERR))
930};
931
932/*
933 * DC8051 Error Flags
934 */
935#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936static struct flag_table dc8051_err_flags[] = {
937 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800946 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400947 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948};
949
950/*
951 * DC8051 Information Error flags
952 *
953 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954 */
955static struct flag_table dc8051_info_err_flags[] = {
956 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
957 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
958 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
959 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800960 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400961 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
962 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
963 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
964 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
965 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
966 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -0800968 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
969 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001034static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001035static void handle_temp_err(struct hfi1_devdata *);
1036static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *);
Dean Luick8f000f72016-04-12 11:32:06 -07001038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np);
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07001040static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001041
1042/*
1043 * Error interrupt table entry. This is used as input to the interrupt
1044 * "clear down" routine used for all second tier error interrupt register.
1045 * Second tier interrupt registers have a single bit representing them
1046 * in the top-level CceIntStatus.
1047 */
1048struct err_reg_info {
1049 u32 status; /* status CSR offset */
1050 u32 clear; /* clear CSR offset */
1051 u32 mask; /* mask CSR offset */
1052 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1053 const char *desc;
1054};
1055
1056#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1057#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1058#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1059
1060/*
1061 * Helpers for building HFI and DC error interrupt table entries. Different
1062 * helpers are needed because of inconsistent register names.
1063 */
1064#define EE(reg, handler, desc) \
1065 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1066 handler, desc }
1067#define DC_EE1(reg, handler, desc) \
1068 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1069#define DC_EE2(reg, handler, desc) \
1070 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1071
1072/*
1073 * Table of the "misc" grouping of error interrupts. Each entry refers to
1074 * another register containing more information.
1075 */
1076static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1077/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1078/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1079/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1080/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1081/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1082/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1083/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1084/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1085 /* the rest are reserved */
1086};
1087
1088/*
1089 * Index into the Various section of the interrupt sources
1090 * corresponding to the Critical Temperature interrupt.
1091 */
1092#define TCRIT_INT_SOURCE 4
1093
1094/*
1095 * SDMA error interrupt entry - refers to another register containing more
1096 * information.
1097 */
1098static const struct err_reg_info sdma_eng_err =
1099 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1100
1101static const struct err_reg_info various_err[NUM_VARIOUS] = {
1102/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1103/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1104/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1105/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1106/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1107 /* rest are reserved */
1108};
1109
1110/*
1111 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1112 * register can not be derived from the MTU value because 10K is not
1113 * a power of 2. Therefore, we need a constant. Everything else can
1114 * be calculated.
1115 */
1116#define DCC_CFG_PORT_MTU_CAP_10240 7
1117
1118/*
1119 * Table of the DC grouping of error interrupts. Each entry refers to
1120 * another register containing more information.
1121 */
1122static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1123/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1124/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1125/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1126/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1127 /* the rest are reserved */
1128};
1129
1130struct cntr_entry {
1131 /*
1132 * counter name
1133 */
1134 char *name;
1135
1136 /*
1137 * csr to read for name (if applicable)
1138 */
1139 u64 csr;
1140
1141 /*
1142 * offset into dd or ppd to store the counter's value
1143 */
1144 int offset;
1145
1146 /*
1147 * flags
1148 */
1149 u8 flags;
1150
1151 /*
1152 * accessor for stat element, context either dd or ppd
1153 */
Jubin John17fb4f22016-02-14 20:21:52 -08001154 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1155 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001156};
1157
1158#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1160
1161#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1162{ \
1163 name, \
1164 csr, \
1165 offset, \
1166 flags, \
1167 accessor \
1168}
1169
1170/* 32bit RXE */
1171#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1172CNTR_ELEM(#name, \
1173 (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 0, flags | CNTR_32BIT, \
1175 port_access_u32_csr)
1176
1177#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1178CNTR_ELEM(#name, \
1179 (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 0, flags | CNTR_32BIT, \
1181 dev_access_u32_csr)
1182
1183/* 64bit RXE */
1184#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186 (counter * 8 + RCV_COUNTER_ARRAY64), \
1187 0, flags, \
1188 port_access_u64_csr)
1189
1190#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1191CNTR_ELEM(#name, \
1192 (counter * 8 + RCV_COUNTER_ARRAY64), \
1193 0, flags, \
1194 dev_access_u64_csr)
1195
1196#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197#define OVR_ELM(ctx) \
1198CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001199 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001200 0, CNTR_NORMAL, port_access_u64_csr)
1201
1202/* 32bit TXE */
1203#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1204CNTR_ELEM(#name, \
1205 (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 0, flags | CNTR_32BIT, \
1207 port_access_u32_csr)
1208
1209/* 64bit TXE */
1210#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name, \
1212 (counter * 8 + SEND_COUNTER_ARRAY64), \
1213 0, flags, \
1214 port_access_u64_csr)
1215
1216# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1217CNTR_ELEM(#name,\
1218 counter * 8 + SEND_COUNTER_ARRAY64, \
1219 0, \
1220 flags, \
1221 dev_access_u64_csr)
1222
1223/* CCE */
1224#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226 (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1228 dev_access_u32_csr)
1229
1230#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 0, flags | CNTR_32BIT, \
1234 dev_access_u32_csr)
1235
1236/* DC */
1237#define DC_PERF_CNTR(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 counter, \
1240 0, \
1241 flags, \
1242 dev_access_u64_csr)
1243
1244#define DC_PERF_CNTR_LCB(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 counter, \
1247 0, \
1248 flags, \
1249 dc_access_lcb_cntr)
1250
1251/* ibp counters */
1252#define SW_IBP_CNTR(name, cntr) \
1253CNTR_ELEM(#name, \
1254 0, \
1255 0, \
1256 CNTR_SYNTH, \
1257 access_ibp_##cntr)
1258
1259u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1260{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001261 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301262 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001263 }
1264 return -1;
1265}
1266
1267void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1268{
1269 if (dd->flags & HFI1_PRESENT)
1270 writeq(value, (void __iomem *)dd->kregbase + offset);
1271}
1272
1273void __iomem *get_csr_addr(
1274 struct hfi1_devdata *dd,
1275 u32 offset)
1276{
1277 return (void __iomem *)dd->kregbase + offset;
1278}
1279
1280static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1281 int mode, u64 value)
1282{
1283 u64 ret;
1284
Mike Marciniszyn77241052015-07-30 15:17:43 -04001285 if (mode == CNTR_MODE_R) {
1286 ret = read_csr(dd, csr);
1287 } else if (mode == CNTR_MODE_W) {
1288 write_csr(dd, csr, value);
1289 ret = value;
1290 } else {
1291 dd_dev_err(dd, "Invalid cntr register access mode");
1292 return 0;
1293 }
1294
1295 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1296 return ret;
1297}
1298
1299/* Dev Access */
1300static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001301 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001302{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301303 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001304 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001305
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001306 if (entry->flags & CNTR_SDMA) {
1307 if (vl == CNTR_INVALID_VL)
1308 return 0;
1309 csr += 0x100 * vl;
1310 } else {
1311 if (vl != CNTR_INVALID_VL)
1312 return 0;
1313 }
1314 return read_write_csr(dd, csr, mode, data);
1315}
1316
1317static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1318 void *context, int idx, int mode, u64 data)
1319{
1320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1321
1322 if (dd->per_sdma && idx < dd->num_sdma)
1323 return dd->per_sdma[idx].err_cnt;
1324 return 0;
1325}
1326
1327static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1328 void *context, int idx, int mode, u64 data)
1329{
1330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1331
1332 if (dd->per_sdma && idx < dd->num_sdma)
1333 return dd->per_sdma[idx].sdma_int_cnt;
1334 return 0;
1335}
1336
1337static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1338 void *context, int idx, int mode, u64 data)
1339{
1340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1341
1342 if (dd->per_sdma && idx < dd->num_sdma)
1343 return dd->per_sdma[idx].idle_int_cnt;
1344 return 0;
1345}
1346
1347static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1348 void *context, int idx, int mode,
1349 u64 data)
1350{
1351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1352
1353 if (dd->per_sdma && idx < dd->num_sdma)
1354 return dd->per_sdma[idx].progress_int_cnt;
1355 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001356}
1357
1358static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001359 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001360{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301361 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001362
1363 u64 val = 0;
1364 u64 csr = entry->csr;
1365
1366 if (entry->flags & CNTR_VL) {
1367 if (vl == CNTR_INVALID_VL)
1368 return 0;
1369 csr += 8 * vl;
1370 } else {
1371 if (vl != CNTR_INVALID_VL)
1372 return 0;
1373 }
1374
1375 val = read_write_csr(dd, csr, mode, data);
1376 return val;
1377}
1378
1379static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001380 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001381{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301382 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001383 u32 csr = entry->csr;
1384 int ret = 0;
1385
1386 if (vl != CNTR_INVALID_VL)
1387 return 0;
1388 if (mode == CNTR_MODE_R)
1389 ret = read_lcb_csr(dd, csr, &data);
1390 else if (mode == CNTR_MODE_W)
1391 ret = write_lcb_csr(dd, csr, data);
1392
1393 if (ret) {
1394 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1395 return 0;
1396 }
1397
1398 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1399 return data;
1400}
1401
1402/* Port Access */
1403static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001404 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001405{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301406 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001407
1408 if (vl != CNTR_INVALID_VL)
1409 return 0;
1410 return read_write_csr(ppd->dd, entry->csr, mode, data);
1411}
1412
1413static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001414 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001415{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301416 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001417 u64 val;
1418 u64 csr = entry->csr;
1419
1420 if (entry->flags & CNTR_VL) {
1421 if (vl == CNTR_INVALID_VL)
1422 return 0;
1423 csr += 8 * vl;
1424 } else {
1425 if (vl != CNTR_INVALID_VL)
1426 return 0;
1427 }
1428 val = read_write_csr(ppd->dd, csr, mode, data);
1429 return val;
1430}
1431
1432/* Software defined */
1433static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1434 u64 data)
1435{
1436 u64 ret;
1437
1438 if (mode == CNTR_MODE_R) {
1439 ret = *cntr;
1440 } else if (mode == CNTR_MODE_W) {
1441 *cntr = data;
1442 ret = data;
1443 } else {
1444 dd_dev_err(dd, "Invalid cntr sw access mode");
1445 return 0;
1446 }
1447
1448 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1449
1450 return ret;
1451}
1452
1453static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001454 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001455{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301456 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001457
1458 if (vl != CNTR_INVALID_VL)
1459 return 0;
1460 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1461}
1462
1463static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001464 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001465{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301466 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001467
1468 if (vl != CNTR_INVALID_VL)
1469 return 0;
1470 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1471}
1472
Dean Luick6d014532015-12-01 15:38:23 -05001473static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1474 void *context, int vl, int mode,
1475 u64 data)
1476{
1477 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1478
1479 if (vl != CNTR_INVALID_VL)
1480 return 0;
1481 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1482}
1483
Mike Marciniszyn77241052015-07-30 15:17:43 -04001484static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001485 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001486{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001487 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1488 u64 zero = 0;
1489 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001490
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001491 if (vl == CNTR_INVALID_VL)
1492 counter = &ppd->port_xmit_discards;
1493 else if (vl >= 0 && vl < C_VL_COUNT)
1494 counter = &ppd->port_xmit_discards_vl[vl];
1495 else
1496 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001498 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499}
1500
1501static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001502 void *context, int vl, int mode,
1503 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001504{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301505 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506
1507 if (vl != CNTR_INVALID_VL)
1508 return 0;
1509
1510 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1511 mode, data);
1512}
1513
1514static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001515 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001516{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301517 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001518
1519 if (vl != CNTR_INVALID_VL)
1520 return 0;
1521
1522 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1523 mode, data);
1524}
1525
1526u64 get_all_cpu_total(u64 __percpu *cntr)
1527{
1528 int cpu;
1529 u64 counter = 0;
1530
1531 for_each_possible_cpu(cpu)
1532 counter += *per_cpu_ptr(cntr, cpu);
1533 return counter;
1534}
1535
1536static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1537 u64 __percpu *cntr,
1538 int vl, int mode, u64 data)
1539{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001540 u64 ret = 0;
1541
1542 if (vl != CNTR_INVALID_VL)
1543 return 0;
1544
1545 if (mode == CNTR_MODE_R) {
1546 ret = get_all_cpu_total(cntr) - *z_val;
1547 } else if (mode == CNTR_MODE_W) {
1548 /* A write can only zero the counter */
1549 if (data == 0)
1550 *z_val = get_all_cpu_total(cntr);
1551 else
1552 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1553 } else {
1554 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1555 return 0;
1556 }
1557
1558 return ret;
1559}
1560
1561static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1562 void *context, int vl, int mode, u64 data)
1563{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301564 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001565
1566 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1567 mode, data);
1568}
1569
1570static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001571 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001572{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301573 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574
1575 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1576 mode, data);
1577}
1578
1579static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1580 void *context, int vl, int mode, u64 data)
1581{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301582 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001583
1584 return dd->verbs_dev.n_piowait;
1585}
1586
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001587static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1588 void *context, int vl, int mode, u64 data)
1589{
1590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1591
1592 return dd->verbs_dev.n_piodrain;
1593}
1594
Mike Marciniszyn77241052015-07-30 15:17:43 -04001595static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1596 void *context, int vl, int mode, u64 data)
1597{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301598 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001599
1600 return dd->verbs_dev.n_txwait;
1601}
1602
1603static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1604 void *context, int vl, int mode, u64 data)
1605{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301606 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001607
1608 return dd->verbs_dev.n_kmem_wait;
1609}
1610
Dean Luickb4219222015-10-26 10:28:35 -04001611static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001612 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001613{
1614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1615
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001616 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1617 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001618}
1619
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001620/* Software counters for the error status bits within MISC_ERR_STATUS */
1621static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1622 void *context, int vl, int mode,
1623 u64 data)
1624{
1625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1626
1627 return dd->misc_err_status_cnt[12];
1628}
1629
1630static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1631 void *context, int vl, int mode,
1632 u64 data)
1633{
1634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635
1636 return dd->misc_err_status_cnt[11];
1637}
1638
1639static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1640 void *context, int vl, int mode,
1641 u64 data)
1642{
1643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644
1645 return dd->misc_err_status_cnt[10];
1646}
1647
1648static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1649 void *context, int vl,
1650 int mode, u64 data)
1651{
1652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653
1654 return dd->misc_err_status_cnt[9];
1655}
1656
1657static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1658 void *context, int vl, int mode,
1659 u64 data)
1660{
1661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662
1663 return dd->misc_err_status_cnt[8];
1664}
1665
1666static u64 access_misc_efuse_read_bad_addr_err_cnt(
1667 const struct cntr_entry *entry,
1668 void *context, int vl, int mode, u64 data)
1669{
1670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671
1672 return dd->misc_err_status_cnt[7];
1673}
1674
1675static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1676 void *context, int vl,
1677 int mode, u64 data)
1678{
1679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681 return dd->misc_err_status_cnt[6];
1682}
1683
1684static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1685 void *context, int vl, int mode,
1686 u64 data)
1687{
1688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689
1690 return dd->misc_err_status_cnt[5];
1691}
1692
1693static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1694 void *context, int vl, int mode,
1695 u64 data)
1696{
1697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698
1699 return dd->misc_err_status_cnt[4];
1700}
1701
1702static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1703 void *context, int vl,
1704 int mode, u64 data)
1705{
1706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708 return dd->misc_err_status_cnt[3];
1709}
1710
1711static u64 access_misc_csr_write_bad_addr_err_cnt(
1712 const struct cntr_entry *entry,
1713 void *context, int vl, int mode, u64 data)
1714{
1715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717 return dd->misc_err_status_cnt[2];
1718}
1719
1720static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1721 void *context, int vl,
1722 int mode, u64 data)
1723{
1724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1725
1726 return dd->misc_err_status_cnt[1];
1727}
1728
1729static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1730 void *context, int vl, int mode,
1731 u64 data)
1732{
1733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1734
1735 return dd->misc_err_status_cnt[0];
1736}
1737
1738/*
1739 * Software counter for the aggregate of
1740 * individual CceErrStatus counters
1741 */
1742static u64 access_sw_cce_err_status_aggregated_cnt(
1743 const struct cntr_entry *entry,
1744 void *context, int vl, int mode, u64 data)
1745{
1746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747
1748 return dd->sw_cce_err_status_aggregate;
1749}
1750
1751/*
1752 * Software counters corresponding to each of the
1753 * error status bits within CceErrStatus
1754 */
1755static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1756 void *context, int vl, int mode,
1757 u64 data)
1758{
1759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1760
1761 return dd->cce_err_status_cnt[40];
1762}
1763
1764static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1765 void *context, int vl, int mode,
1766 u64 data)
1767{
1768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769
1770 return dd->cce_err_status_cnt[39];
1771}
1772
1773static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1774 void *context, int vl, int mode,
1775 u64 data)
1776{
1777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778
1779 return dd->cce_err_status_cnt[38];
1780}
1781
1782static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1783 void *context, int vl, int mode,
1784 u64 data)
1785{
1786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788 return dd->cce_err_status_cnt[37];
1789}
1790
1791static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1792 void *context, int vl, int mode,
1793 u64 data)
1794{
1795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796
1797 return dd->cce_err_status_cnt[36];
1798}
1799
1800static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1801 const struct cntr_entry *entry,
1802 void *context, int vl, int mode, u64 data)
1803{
1804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805
1806 return dd->cce_err_status_cnt[35];
1807}
1808
1809static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1810 const struct cntr_entry *entry,
1811 void *context, int vl, int mode, u64 data)
1812{
1813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814
1815 return dd->cce_err_status_cnt[34];
1816}
1817
1818static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1819 void *context, int vl,
1820 int mode, u64 data)
1821{
1822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1823
1824 return dd->cce_err_status_cnt[33];
1825}
1826
1827static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1828 void *context, int vl, int mode,
1829 u64 data)
1830{
1831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1832
1833 return dd->cce_err_status_cnt[32];
1834}
1835
1836static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1837 void *context, int vl, int mode, u64 data)
1838{
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841 return dd->cce_err_status_cnt[31];
1842}
1843
1844static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1845 void *context, int vl, int mode,
1846 u64 data)
1847{
1848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849
1850 return dd->cce_err_status_cnt[30];
1851}
1852
1853static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1854 void *context, int vl, int mode,
1855 u64 data)
1856{
1857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858
1859 return dd->cce_err_status_cnt[29];
1860}
1861
1862static u64 access_pcic_transmit_back_parity_err_cnt(
1863 const struct cntr_entry *entry,
1864 void *context, int vl, int mode, u64 data)
1865{
1866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867
1868 return dd->cce_err_status_cnt[28];
1869}
1870
1871static u64 access_pcic_transmit_front_parity_err_cnt(
1872 const struct cntr_entry *entry,
1873 void *context, int vl, int mode, u64 data)
1874{
1875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876
1877 return dd->cce_err_status_cnt[27];
1878}
1879
1880static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1881 void *context, int vl, int mode,
1882 u64 data)
1883{
1884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885
1886 return dd->cce_err_status_cnt[26];
1887}
1888
1889static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1890 void *context, int vl, int mode,
1891 u64 data)
1892{
1893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894
1895 return dd->cce_err_status_cnt[25];
1896}
1897
1898static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1899 void *context, int vl, int mode,
1900 u64 data)
1901{
1902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903
1904 return dd->cce_err_status_cnt[24];
1905}
1906
1907static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1908 void *context, int vl, int mode,
1909 u64 data)
1910{
1911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912
1913 return dd->cce_err_status_cnt[23];
1914}
1915
1916static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1917 void *context, int vl,
1918 int mode, u64 data)
1919{
1920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921
1922 return dd->cce_err_status_cnt[22];
1923}
1924
1925static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1926 void *context, int vl, int mode,
1927 u64 data)
1928{
1929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930
1931 return dd->cce_err_status_cnt[21];
1932}
1933
1934static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1935 const struct cntr_entry *entry,
1936 void *context, int vl, int mode, u64 data)
1937{
1938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939
1940 return dd->cce_err_status_cnt[20];
1941}
1942
1943static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1944 void *context, int vl,
1945 int mode, u64 data)
1946{
1947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948
1949 return dd->cce_err_status_cnt[19];
1950}
1951
1952static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1953 void *context, int vl, int mode,
1954 u64 data)
1955{
1956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957
1958 return dd->cce_err_status_cnt[18];
1959}
1960
1961static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1962 void *context, int vl, int mode,
1963 u64 data)
1964{
1965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966
1967 return dd->cce_err_status_cnt[17];
1968}
1969
1970static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1971 void *context, int vl, int mode,
1972 u64 data)
1973{
1974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975
1976 return dd->cce_err_status_cnt[16];
1977}
1978
1979static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1980 void *context, int vl, int mode,
1981 u64 data)
1982{
1983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984
1985 return dd->cce_err_status_cnt[15];
1986}
1987
1988static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1989 void *context, int vl,
1990 int mode, u64 data)
1991{
1992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993
1994 return dd->cce_err_status_cnt[14];
1995}
1996
1997static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1998 void *context, int vl, int mode,
1999 u64 data)
2000{
2001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002
2003 return dd->cce_err_status_cnt[13];
2004}
2005
2006static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2007 const struct cntr_entry *entry,
2008 void *context, int vl, int mode, u64 data)
2009{
2010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011
2012 return dd->cce_err_status_cnt[12];
2013}
2014
2015static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2016 const struct cntr_entry *entry,
2017 void *context, int vl, int mode, u64 data)
2018{
2019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020
2021 return dd->cce_err_status_cnt[11];
2022}
2023
2024static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2025 const struct cntr_entry *entry,
2026 void *context, int vl, int mode, u64 data)
2027{
2028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029
2030 return dd->cce_err_status_cnt[10];
2031}
2032
2033static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2034 const struct cntr_entry *entry,
2035 void *context, int vl, int mode, u64 data)
2036{
2037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038
2039 return dd->cce_err_status_cnt[9];
2040}
2041
2042static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2043 const struct cntr_entry *entry,
2044 void *context, int vl, int mode, u64 data)
2045{
2046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047
2048 return dd->cce_err_status_cnt[8];
2049}
2050
2051static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2052 void *context, int vl,
2053 int mode, u64 data)
2054{
2055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056
2057 return dd->cce_err_status_cnt[7];
2058}
2059
2060static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2061 const struct cntr_entry *entry,
2062 void *context, int vl, int mode, u64 data)
2063{
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066 return dd->cce_err_status_cnt[6];
2067}
2068
2069static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2070 void *context, int vl, int mode,
2071 u64 data)
2072{
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075 return dd->cce_err_status_cnt[5];
2076}
2077
2078static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl, int mode,
2080 u64 data)
2081{
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084 return dd->cce_err_status_cnt[4];
2085}
2086
2087static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2088 const struct cntr_entry *entry,
2089 void *context, int vl, int mode, u64 data)
2090{
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093 return dd->cce_err_status_cnt[3];
2094}
2095
2096static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2097 void *context, int vl,
2098 int mode, u64 data)
2099{
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102 return dd->cce_err_status_cnt[2];
2103}
2104
2105static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2106 void *context, int vl,
2107 int mode, u64 data)
2108{
2109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2110
2111 return dd->cce_err_status_cnt[1];
2112}
2113
2114static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2115 void *context, int vl, int mode,
2116 u64 data)
2117{
2118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2119
2120 return dd->cce_err_status_cnt[0];
2121}
2122
2123/*
2124 * Software counters corresponding to each of the
2125 * error status bits within RcvErrStatus
2126 */
2127static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2128 void *context, int vl, int mode,
2129 u64 data)
2130{
2131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133 return dd->rcv_err_status_cnt[63];
2134}
2135
2136static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2137 void *context, int vl,
2138 int mode, u64 data)
2139{
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142 return dd->rcv_err_status_cnt[62];
2143}
2144
2145static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2146 void *context, int vl, int mode,
2147 u64 data)
2148{
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151 return dd->rcv_err_status_cnt[61];
2152}
2153
2154static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl, int mode,
2156 u64 data)
2157{
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160 return dd->rcv_err_status_cnt[60];
2161}
2162
2163static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2164 void *context, int vl,
2165 int mode, u64 data)
2166{
2167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169 return dd->rcv_err_status_cnt[59];
2170}
2171
2172static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2173 void *context, int vl,
2174 int mode, u64 data)
2175{
2176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178 return dd->rcv_err_status_cnt[58];
2179}
2180
2181static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2182 void *context, int vl, int mode,
2183 u64 data)
2184{
2185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187 return dd->rcv_err_status_cnt[57];
2188}
2189
2190static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2191 void *context, int vl, int mode,
2192 u64 data)
2193{
2194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196 return dd->rcv_err_status_cnt[56];
2197}
2198
2199static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2200 void *context, int vl, int mode,
2201 u64 data)
2202{
2203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205 return dd->rcv_err_status_cnt[55];
2206}
2207
2208static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2209 const struct cntr_entry *entry,
2210 void *context, int vl, int mode, u64 data)
2211{
2212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214 return dd->rcv_err_status_cnt[54];
2215}
2216
2217static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2218 const struct cntr_entry *entry,
2219 void *context, int vl, int mode, u64 data)
2220{
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223 return dd->rcv_err_status_cnt[53];
2224}
2225
2226static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2227 void *context, int vl,
2228 int mode, u64 data)
2229{
2230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231
2232 return dd->rcv_err_status_cnt[52];
2233}
2234
2235static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2236 void *context, int vl,
2237 int mode, u64 data)
2238{
2239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240
2241 return dd->rcv_err_status_cnt[51];
2242}
2243
2244static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2245 void *context, int vl,
2246 int mode, u64 data)
2247{
2248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249
2250 return dd->rcv_err_status_cnt[50];
2251}
2252
2253static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2254 void *context, int vl,
2255 int mode, u64 data)
2256{
2257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258
2259 return dd->rcv_err_status_cnt[49];
2260}
2261
2262static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2263 void *context, int vl,
2264 int mode, u64 data)
2265{
2266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267
2268 return dd->rcv_err_status_cnt[48];
2269}
2270
2271static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2272 void *context, int vl,
2273 int mode, u64 data)
2274{
2275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276
2277 return dd->rcv_err_status_cnt[47];
2278}
2279
2280static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2281 void *context, int vl, int mode,
2282 u64 data)
2283{
2284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285
2286 return dd->rcv_err_status_cnt[46];
2287}
2288
2289static u64 access_rx_hq_intr_csr_parity_err_cnt(
2290 const struct cntr_entry *entry,
2291 void *context, int vl, int mode, u64 data)
2292{
2293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294
2295 return dd->rcv_err_status_cnt[45];
2296}
2297
2298static u64 access_rx_lookup_csr_parity_err_cnt(
2299 const struct cntr_entry *entry,
2300 void *context, int vl, int mode, u64 data)
2301{
2302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303
2304 return dd->rcv_err_status_cnt[44];
2305}
2306
2307static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2308 const struct cntr_entry *entry,
2309 void *context, int vl, int mode, u64 data)
2310{
2311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312
2313 return dd->rcv_err_status_cnt[43];
2314}
2315
2316static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2317 const struct cntr_entry *entry,
2318 void *context, int vl, int mode, u64 data)
2319{
2320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321
2322 return dd->rcv_err_status_cnt[42];
2323}
2324
2325static u64 access_rx_lookup_des_part2_parity_err_cnt(
2326 const struct cntr_entry *entry,
2327 void *context, int vl, int mode, u64 data)
2328{
2329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330
2331 return dd->rcv_err_status_cnt[41];
2332}
2333
2334static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2335 const struct cntr_entry *entry,
2336 void *context, int vl, int mode, u64 data)
2337{
2338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339
2340 return dd->rcv_err_status_cnt[40];
2341}
2342
2343static u64 access_rx_lookup_des_part1_unc_err_cnt(
2344 const struct cntr_entry *entry,
2345 void *context, int vl, int mode, u64 data)
2346{
2347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348
2349 return dd->rcv_err_status_cnt[39];
2350}
2351
2352static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2353 const struct cntr_entry *entry,
2354 void *context, int vl, int mode, u64 data)
2355{
2356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357
2358 return dd->rcv_err_status_cnt[38];
2359}
2360
2361static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2362 const struct cntr_entry *entry,
2363 void *context, int vl, int mode, u64 data)
2364{
2365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366
2367 return dd->rcv_err_status_cnt[37];
2368}
2369
2370static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2371 const struct cntr_entry *entry,
2372 void *context, int vl, int mode, u64 data)
2373{
2374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375
2376 return dd->rcv_err_status_cnt[36];
2377}
2378
2379static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2380 const struct cntr_entry *entry,
2381 void *context, int vl, int mode, u64 data)
2382{
2383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384
2385 return dd->rcv_err_status_cnt[35];
2386}
2387
2388static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2389 const struct cntr_entry *entry,
2390 void *context, int vl, int mode, u64 data)
2391{
2392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393
2394 return dd->rcv_err_status_cnt[34];
2395}
2396
2397static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2398 const struct cntr_entry *entry,
2399 void *context, int vl, int mode, u64 data)
2400{
2401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402
2403 return dd->rcv_err_status_cnt[33];
2404}
2405
2406static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2407 void *context, int vl, int mode,
2408 u64 data)
2409{
2410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411
2412 return dd->rcv_err_status_cnt[32];
2413}
2414
2415static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2416 void *context, int vl, int mode,
2417 u64 data)
2418{
2419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420
2421 return dd->rcv_err_status_cnt[31];
2422}
2423
2424static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2425 void *context, int vl, int mode,
2426 u64 data)
2427{
2428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429
2430 return dd->rcv_err_status_cnt[30];
2431}
2432
2433static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2434 void *context, int vl, int mode,
2435 u64 data)
2436{
2437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438
2439 return dd->rcv_err_status_cnt[29];
2440}
2441
2442static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2443 void *context, int vl,
2444 int mode, u64 data)
2445{
2446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447
2448 return dd->rcv_err_status_cnt[28];
2449}
2450
2451static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2452 const struct cntr_entry *entry,
2453 void *context, int vl, int mode, u64 data)
2454{
2455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456
2457 return dd->rcv_err_status_cnt[27];
2458}
2459
2460static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2461 const struct cntr_entry *entry,
2462 void *context, int vl, int mode, u64 data)
2463{
2464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465
2466 return dd->rcv_err_status_cnt[26];
2467}
2468
2469static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2470 const struct cntr_entry *entry,
2471 void *context, int vl, int mode, u64 data)
2472{
2473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474
2475 return dd->rcv_err_status_cnt[25];
2476}
2477
2478static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2479 const struct cntr_entry *entry,
2480 void *context, int vl, int mode, u64 data)
2481{
2482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483
2484 return dd->rcv_err_status_cnt[24];
2485}
2486
2487static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2488 const struct cntr_entry *entry,
2489 void *context, int vl, int mode, u64 data)
2490{
2491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492
2493 return dd->rcv_err_status_cnt[23];
2494}
2495
2496static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2497 const struct cntr_entry *entry,
2498 void *context, int vl, int mode, u64 data)
2499{
2500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501
2502 return dd->rcv_err_status_cnt[22];
2503}
2504
2505static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2506 const struct cntr_entry *entry,
2507 void *context, int vl, int mode, u64 data)
2508{
2509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510
2511 return dd->rcv_err_status_cnt[21];
2512}
2513
2514static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2515 const struct cntr_entry *entry,
2516 void *context, int vl, int mode, u64 data)
2517{
2518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519
2520 return dd->rcv_err_status_cnt[20];
2521}
2522
2523static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2524 const struct cntr_entry *entry,
2525 void *context, int vl, int mode, u64 data)
2526{
2527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528
2529 return dd->rcv_err_status_cnt[19];
2530}
2531
2532static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2533 void *context, int vl,
2534 int mode, u64 data)
2535{
2536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537
2538 return dd->rcv_err_status_cnt[18];
2539}
2540
2541static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2542 void *context, int vl,
2543 int mode, u64 data)
2544{
2545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546
2547 return dd->rcv_err_status_cnt[17];
2548}
2549
2550static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2551 const struct cntr_entry *entry,
2552 void *context, int vl, int mode, u64 data)
2553{
2554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555
2556 return dd->rcv_err_status_cnt[16];
2557}
2558
2559static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2560 const struct cntr_entry *entry,
2561 void *context, int vl, int mode, u64 data)
2562{
2563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564
2565 return dd->rcv_err_status_cnt[15];
2566}
2567
2568static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2569 void *context, int vl,
2570 int mode, u64 data)
2571{
2572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573
2574 return dd->rcv_err_status_cnt[14];
2575}
2576
2577static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2578 void *context, int vl,
2579 int mode, u64 data)
2580{
2581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582
2583 return dd->rcv_err_status_cnt[13];
2584}
2585
2586static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2587 void *context, int vl, int mode,
2588 u64 data)
2589{
2590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591
2592 return dd->rcv_err_status_cnt[12];
2593}
2594
2595static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2596 void *context, int vl, int mode,
2597 u64 data)
2598{
2599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600
2601 return dd->rcv_err_status_cnt[11];
2602}
2603
2604static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2605 void *context, int vl, int mode,
2606 u64 data)
2607{
2608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609
2610 return dd->rcv_err_status_cnt[10];
2611}
2612
2613static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2614 void *context, int vl, int mode,
2615 u64 data)
2616{
2617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618
2619 return dd->rcv_err_status_cnt[9];
2620}
2621
2622static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2623 void *context, int vl, int mode,
2624 u64 data)
2625{
2626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627
2628 return dd->rcv_err_status_cnt[8];
2629}
2630
2631static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2632 const struct cntr_entry *entry,
2633 void *context, int vl, int mode, u64 data)
2634{
2635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636
2637 return dd->rcv_err_status_cnt[7];
2638}
2639
2640static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2641 const struct cntr_entry *entry,
2642 void *context, int vl, int mode, u64 data)
2643{
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646 return dd->rcv_err_status_cnt[6];
2647}
2648
2649static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2650 void *context, int vl, int mode,
2651 u64 data)
2652{
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655 return dd->rcv_err_status_cnt[5];
2656}
2657
2658static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2659 void *context, int vl, int mode,
2660 u64 data)
2661{
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664 return dd->rcv_err_status_cnt[4];
2665}
2666
2667static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2668 void *context, int vl, int mode,
2669 u64 data)
2670{
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673 return dd->rcv_err_status_cnt[3];
2674}
2675
2676static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2678 u64 data)
2679{
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682 return dd->rcv_err_status_cnt[2];
2683}
2684
2685static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2686 void *context, int vl, int mode,
2687 u64 data)
2688{
2689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2690
2691 return dd->rcv_err_status_cnt[1];
2692}
2693
2694static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2695 void *context, int vl, int mode,
2696 u64 data)
2697{
2698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2699
2700 return dd->rcv_err_status_cnt[0];
2701}
2702
2703/*
2704 * Software counters corresponding to each of the
2705 * error status bits within SendPioErrStatus
2706 */
2707static u64 access_pio_pec_sop_head_parity_err_cnt(
2708 const struct cntr_entry *entry,
2709 void *context, int vl, int mode, u64 data)
2710{
2711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713 return dd->send_pio_err_status_cnt[35];
2714}
2715
2716static u64 access_pio_pcc_sop_head_parity_err_cnt(
2717 const struct cntr_entry *entry,
2718 void *context, int vl, int mode, u64 data)
2719{
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722 return dd->send_pio_err_status_cnt[34];
2723}
2724
2725static u64 access_pio_last_returned_cnt_parity_err_cnt(
2726 const struct cntr_entry *entry,
2727 void *context, int vl, int mode, u64 data)
2728{
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731 return dd->send_pio_err_status_cnt[33];
2732}
2733
2734static u64 access_pio_current_free_cnt_parity_err_cnt(
2735 const struct cntr_entry *entry,
2736 void *context, int vl, int mode, u64 data)
2737{
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740 return dd->send_pio_err_status_cnt[32];
2741}
2742
2743static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2744 void *context, int vl, int mode,
2745 u64 data)
2746{
2747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749 return dd->send_pio_err_status_cnt[31];
2750}
2751
2752static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2753 void *context, int vl, int mode,
2754 u64 data)
2755{
2756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758 return dd->send_pio_err_status_cnt[30];
2759}
2760
2761static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2762 void *context, int vl, int mode,
2763 u64 data)
2764{
2765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767 return dd->send_pio_err_status_cnt[29];
2768}
2769
2770static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2771 const struct cntr_entry *entry,
2772 void *context, int vl, int mode, u64 data)
2773{
2774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776 return dd->send_pio_err_status_cnt[28];
2777}
2778
2779static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2780 void *context, int vl, int mode,
2781 u64 data)
2782{
2783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785 return dd->send_pio_err_status_cnt[27];
2786}
2787
2788static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2789 void *context, int vl, int mode,
2790 u64 data)
2791{
2792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794 return dd->send_pio_err_status_cnt[26];
2795}
2796
2797static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2798 void *context, int vl,
2799 int mode, u64 data)
2800{
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803 return dd->send_pio_err_status_cnt[25];
2804}
2805
2806static u64 access_pio_block_qw_count_parity_err_cnt(
2807 const struct cntr_entry *entry,
2808 void *context, int vl, int mode, u64 data)
2809{
2810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811
2812 return dd->send_pio_err_status_cnt[24];
2813}
2814
2815static u64 access_pio_write_qw_valid_parity_err_cnt(
2816 const struct cntr_entry *entry,
2817 void *context, int vl, int mode, u64 data)
2818{
2819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820
2821 return dd->send_pio_err_status_cnt[23];
2822}
2823
2824static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2825 void *context, int vl, int mode,
2826 u64 data)
2827{
2828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829
2830 return dd->send_pio_err_status_cnt[22];
2831}
2832
2833static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2834 void *context, int vl,
2835 int mode, u64 data)
2836{
2837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838
2839 return dd->send_pio_err_status_cnt[21];
2840}
2841
2842static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2843 void *context, int vl,
2844 int mode, u64 data)
2845{
2846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847
2848 return dd->send_pio_err_status_cnt[20];
2849}
2850
2851static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2852 void *context, int vl,
2853 int mode, u64 data)
2854{
2855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856
2857 return dd->send_pio_err_status_cnt[19];
2858}
2859
2860static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2861 const struct cntr_entry *entry,
2862 void *context, int vl, int mode, u64 data)
2863{
2864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865
2866 return dd->send_pio_err_status_cnt[18];
2867}
2868
2869static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2870 void *context, int vl, int mode,
2871 u64 data)
2872{
2873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874
2875 return dd->send_pio_err_status_cnt[17];
2876}
2877
2878static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2879 void *context, int vl, int mode,
2880 u64 data)
2881{
2882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883
2884 return dd->send_pio_err_status_cnt[16];
2885}
2886
2887static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2888 const struct cntr_entry *entry,
2889 void *context, int vl, int mode, u64 data)
2890{
2891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892
2893 return dd->send_pio_err_status_cnt[15];
2894}
2895
2896static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2897 const struct cntr_entry *entry,
2898 void *context, int vl, int mode, u64 data)
2899{
2900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901
2902 return dd->send_pio_err_status_cnt[14];
2903}
2904
2905static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2906 const struct cntr_entry *entry,
2907 void *context, int vl, int mode, u64 data)
2908{
2909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910
2911 return dd->send_pio_err_status_cnt[13];
2912}
2913
2914static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2915 const struct cntr_entry *entry,
2916 void *context, int vl, int mode, u64 data)
2917{
2918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919
2920 return dd->send_pio_err_status_cnt[12];
2921}
2922
2923static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2924 const struct cntr_entry *entry,
2925 void *context, int vl, int mode, u64 data)
2926{
2927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928
2929 return dd->send_pio_err_status_cnt[11];
2930}
2931
2932static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2933 const struct cntr_entry *entry,
2934 void *context, int vl, int mode, u64 data)
2935{
2936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937
2938 return dd->send_pio_err_status_cnt[10];
2939}
2940
2941static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2942 const struct cntr_entry *entry,
2943 void *context, int vl, int mode, u64 data)
2944{
2945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946
2947 return dd->send_pio_err_status_cnt[9];
2948}
2949
2950static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2951 const struct cntr_entry *entry,
2952 void *context, int vl, int mode, u64 data)
2953{
2954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955
2956 return dd->send_pio_err_status_cnt[8];
2957}
2958
2959static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2960 const struct cntr_entry *entry,
2961 void *context, int vl, int mode, u64 data)
2962{
2963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964
2965 return dd->send_pio_err_status_cnt[7];
2966}
2967
2968static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2969 void *context, int vl, int mode,
2970 u64 data)
2971{
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974 return dd->send_pio_err_status_cnt[6];
2975}
2976
2977static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2978 void *context, int vl, int mode,
2979 u64 data)
2980{
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983 return dd->send_pio_err_status_cnt[5];
2984}
2985
2986static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2987 void *context, int vl, int mode,
2988 u64 data)
2989{
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992 return dd->send_pio_err_status_cnt[4];
2993}
2994
2995static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2996 void *context, int vl, int mode,
2997 u64 data)
2998{
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001 return dd->send_pio_err_status_cnt[3];
3002}
3003
3004static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3005 void *context, int vl, int mode,
3006 u64 data)
3007{
3008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009
3010 return dd->send_pio_err_status_cnt[2];
3011}
3012
3013static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3014 void *context, int vl,
3015 int mode, u64 data)
3016{
3017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3018
3019 return dd->send_pio_err_status_cnt[1];
3020}
3021
3022static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3023 void *context, int vl, int mode,
3024 u64 data)
3025{
3026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3027
3028 return dd->send_pio_err_status_cnt[0];
3029}
3030
3031/*
3032 * Software counters corresponding to each of the
3033 * error status bits within SendDmaErrStatus
3034 */
3035static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3036 const struct cntr_entry *entry,
3037 void *context, int vl, int mode, u64 data)
3038{
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041 return dd->send_dma_err_status_cnt[3];
3042}
3043
3044static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3045 const struct cntr_entry *entry,
3046 void *context, int vl, int mode, u64 data)
3047{
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050 return dd->send_dma_err_status_cnt[2];
3051}
3052
3053static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3054 void *context, int vl, int mode,
3055 u64 data)
3056{
3057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059 return dd->send_dma_err_status_cnt[1];
3060}
3061
3062static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3063 void *context, int vl, int mode,
3064 u64 data)
3065{
3066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068 return dd->send_dma_err_status_cnt[0];
3069}
3070
3071/*
3072 * Software counters corresponding to each of the
3073 * error status bits within SendEgressErrStatus
3074 */
3075static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3076 const struct cntr_entry *entry,
3077 void *context, int vl, int mode, u64 data)
3078{
3079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080
3081 return dd->send_egress_err_status_cnt[63];
3082}
3083
3084static u64 access_tx_read_sdma_memory_csr_err_cnt(
3085 const struct cntr_entry *entry,
3086 void *context, int vl, int mode, u64 data)
3087{
3088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090 return dd->send_egress_err_status_cnt[62];
3091}
3092
3093static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3094 void *context, int vl, int mode,
3095 u64 data)
3096{
3097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099 return dd->send_egress_err_status_cnt[61];
3100}
3101
3102static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3103 void *context, int vl,
3104 int mode, u64 data)
3105{
3106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108 return dd->send_egress_err_status_cnt[60];
3109}
3110
3111static u64 access_tx_read_sdma_memory_cor_err_cnt(
3112 const struct cntr_entry *entry,
3113 void *context, int vl, int mode, u64 data)
3114{
3115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116
3117 return dd->send_egress_err_status_cnt[59];
3118}
3119
3120static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3121 void *context, int vl, int mode,
3122 u64 data)
3123{
3124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125
3126 return dd->send_egress_err_status_cnt[58];
3127}
3128
3129static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3130 void *context, int vl, int mode,
3131 u64 data)
3132{
3133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134
3135 return dd->send_egress_err_status_cnt[57];
3136}
3137
3138static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3139 void *context, int vl, int mode,
3140 u64 data)
3141{
3142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144 return dd->send_egress_err_status_cnt[56];
3145}
3146
3147static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3148 void *context, int vl, int mode,
3149 u64 data)
3150{
3151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153 return dd->send_egress_err_status_cnt[55];
3154}
3155
3156static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3157 void *context, int vl, int mode,
3158 u64 data)
3159{
3160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162 return dd->send_egress_err_status_cnt[54];
3163}
3164
3165static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3166 void *context, int vl, int mode,
3167 u64 data)
3168{
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171 return dd->send_egress_err_status_cnt[53];
3172}
3173
3174static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3175 void *context, int vl, int mode,
3176 u64 data)
3177{
3178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179
3180 return dd->send_egress_err_status_cnt[52];
3181}
3182
3183static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3184 void *context, int vl, int mode,
3185 u64 data)
3186{
3187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188
3189 return dd->send_egress_err_status_cnt[51];
3190}
3191
3192static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3193 void *context, int vl, int mode,
3194 u64 data)
3195{
3196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197
3198 return dd->send_egress_err_status_cnt[50];
3199}
3200
3201static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3202 void *context, int vl, int mode,
3203 u64 data)
3204{
3205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206
3207 return dd->send_egress_err_status_cnt[49];
3208}
3209
3210static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3211 void *context, int vl, int mode,
3212 u64 data)
3213{
3214 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215
3216 return dd->send_egress_err_status_cnt[48];
3217}
3218
3219static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3220 void *context, int vl, int mode,
3221 u64 data)
3222{
3223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224
3225 return dd->send_egress_err_status_cnt[47];
3226}
3227
3228static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3229 void *context, int vl, int mode,
3230 u64 data)
3231{
3232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233
3234 return dd->send_egress_err_status_cnt[46];
3235}
3236
3237static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3238 void *context, int vl, int mode,
3239 u64 data)
3240{
3241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242
3243 return dd->send_egress_err_status_cnt[45];
3244}
3245
3246static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3247 void *context, int vl,
3248 int mode, u64 data)
3249{
3250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251
3252 return dd->send_egress_err_status_cnt[44];
3253}
3254
3255static u64 access_tx_read_sdma_memory_unc_err_cnt(
3256 const struct cntr_entry *entry,
3257 void *context, int vl, int mode, u64 data)
3258{
3259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260
3261 return dd->send_egress_err_status_cnt[43];
3262}
3263
3264static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3265 void *context, int vl, int mode,
3266 u64 data)
3267{
3268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269
3270 return dd->send_egress_err_status_cnt[42];
3271}
3272
3273static u64 access_tx_credit_return_partiy_err_cnt(
3274 const struct cntr_entry *entry,
3275 void *context, int vl, int mode, u64 data)
3276{
3277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278
3279 return dd->send_egress_err_status_cnt[41];
3280}
3281
3282static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3283 const struct cntr_entry *entry,
3284 void *context, int vl, int mode, u64 data)
3285{
3286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287
3288 return dd->send_egress_err_status_cnt[40];
3289}
3290
3291static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3292 const struct cntr_entry *entry,
3293 void *context, int vl, int mode, u64 data)
3294{
3295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296
3297 return dd->send_egress_err_status_cnt[39];
3298}
3299
3300static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3301 const struct cntr_entry *entry,
3302 void *context, int vl, int mode, u64 data)
3303{
3304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305
3306 return dd->send_egress_err_status_cnt[38];
3307}
3308
3309static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3310 const struct cntr_entry *entry,
3311 void *context, int vl, int mode, u64 data)
3312{
3313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314
3315 return dd->send_egress_err_status_cnt[37];
3316}
3317
3318static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3319 const struct cntr_entry *entry,
3320 void *context, int vl, int mode, u64 data)
3321{
3322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323
3324 return dd->send_egress_err_status_cnt[36];
3325}
3326
3327static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3328 const struct cntr_entry *entry,
3329 void *context, int vl, int mode, u64 data)
3330{
3331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332
3333 return dd->send_egress_err_status_cnt[35];
3334}
3335
3336static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3337 const struct cntr_entry *entry,
3338 void *context, int vl, int mode, u64 data)
3339{
3340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341
3342 return dd->send_egress_err_status_cnt[34];
3343}
3344
3345static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3346 const struct cntr_entry *entry,
3347 void *context, int vl, int mode, u64 data)
3348{
3349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350
3351 return dd->send_egress_err_status_cnt[33];
3352}
3353
3354static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3355 const struct cntr_entry *entry,
3356 void *context, int vl, int mode, u64 data)
3357{
3358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359
3360 return dd->send_egress_err_status_cnt[32];
3361}
3362
3363static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3364 const struct cntr_entry *entry,
3365 void *context, int vl, int mode, u64 data)
3366{
3367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368
3369 return dd->send_egress_err_status_cnt[31];
3370}
3371
3372static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3373 const struct cntr_entry *entry,
3374 void *context, int vl, int mode, u64 data)
3375{
3376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377
3378 return dd->send_egress_err_status_cnt[30];
3379}
3380
3381static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3382 const struct cntr_entry *entry,
3383 void *context, int vl, int mode, u64 data)
3384{
3385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386
3387 return dd->send_egress_err_status_cnt[29];
3388}
3389
3390static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3391 const struct cntr_entry *entry,
3392 void *context, int vl, int mode, u64 data)
3393{
3394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395
3396 return dd->send_egress_err_status_cnt[28];
3397}
3398
3399static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3400 const struct cntr_entry *entry,
3401 void *context, int vl, int mode, u64 data)
3402{
3403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404
3405 return dd->send_egress_err_status_cnt[27];
3406}
3407
3408static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3409 const struct cntr_entry *entry,
3410 void *context, int vl, int mode, u64 data)
3411{
3412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413
3414 return dd->send_egress_err_status_cnt[26];
3415}
3416
3417static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3418 const struct cntr_entry *entry,
3419 void *context, int vl, int mode, u64 data)
3420{
3421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422
3423 return dd->send_egress_err_status_cnt[25];
3424}
3425
3426static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3427 const struct cntr_entry *entry,
3428 void *context, int vl, int mode, u64 data)
3429{
3430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431
3432 return dd->send_egress_err_status_cnt[24];
3433}
3434
3435static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3436 const struct cntr_entry *entry,
3437 void *context, int vl, int mode, u64 data)
3438{
3439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440
3441 return dd->send_egress_err_status_cnt[23];
3442}
3443
3444static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3445 const struct cntr_entry *entry,
3446 void *context, int vl, int mode, u64 data)
3447{
3448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449
3450 return dd->send_egress_err_status_cnt[22];
3451}
3452
3453static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3454 const struct cntr_entry *entry,
3455 void *context, int vl, int mode, u64 data)
3456{
3457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458
3459 return dd->send_egress_err_status_cnt[21];
3460}
3461
3462static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3463 const struct cntr_entry *entry,
3464 void *context, int vl, int mode, u64 data)
3465{
3466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467
3468 return dd->send_egress_err_status_cnt[20];
3469}
3470
3471static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3472 const struct cntr_entry *entry,
3473 void *context, int vl, int mode, u64 data)
3474{
3475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476
3477 return dd->send_egress_err_status_cnt[19];
3478}
3479
3480static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3481 const struct cntr_entry *entry,
3482 void *context, int vl, int mode, u64 data)
3483{
3484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485
3486 return dd->send_egress_err_status_cnt[18];
3487}
3488
3489static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3490 const struct cntr_entry *entry,
3491 void *context, int vl, int mode, u64 data)
3492{
3493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494
3495 return dd->send_egress_err_status_cnt[17];
3496}
3497
3498static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3499 const struct cntr_entry *entry,
3500 void *context, int vl, int mode, u64 data)
3501{
3502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503
3504 return dd->send_egress_err_status_cnt[16];
3505}
3506
3507static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3508 void *context, int vl, int mode,
3509 u64 data)
3510{
3511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512
3513 return dd->send_egress_err_status_cnt[15];
3514}
3515
3516static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3517 void *context, int vl,
3518 int mode, u64 data)
3519{
3520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521
3522 return dd->send_egress_err_status_cnt[14];
3523}
3524
3525static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3526 void *context, int vl, int mode,
3527 u64 data)
3528{
3529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530
3531 return dd->send_egress_err_status_cnt[13];
3532}
3533
3534static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3535 void *context, int vl, int mode,
3536 u64 data)
3537{
3538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539
3540 return dd->send_egress_err_status_cnt[12];
3541}
3542
3543static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3544 const struct cntr_entry *entry,
3545 void *context, int vl, int mode, u64 data)
3546{
3547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548
3549 return dd->send_egress_err_status_cnt[11];
3550}
3551
3552static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3553 void *context, int vl, int mode,
3554 u64 data)
3555{
3556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557
3558 return dd->send_egress_err_status_cnt[10];
3559}
3560
3561static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3562 void *context, int vl, int mode,
3563 u64 data)
3564{
3565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566
3567 return dd->send_egress_err_status_cnt[9];
3568}
3569
3570static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3571 const struct cntr_entry *entry,
3572 void *context, int vl, int mode, u64 data)
3573{
3574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575
3576 return dd->send_egress_err_status_cnt[8];
3577}
3578
3579static u64 access_tx_pio_launch_intf_parity_err_cnt(
3580 const struct cntr_entry *entry,
3581 void *context, int vl, int mode, u64 data)
3582{
3583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584
3585 return dd->send_egress_err_status_cnt[7];
3586}
3587
3588static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3589 void *context, int vl, int mode,
3590 u64 data)
3591{
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594 return dd->send_egress_err_status_cnt[6];
3595}
3596
3597static u64 access_tx_incorrect_link_state_err_cnt(
3598 const struct cntr_entry *entry,
3599 void *context, int vl, int mode, u64 data)
3600{
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603 return dd->send_egress_err_status_cnt[5];
3604}
3605
3606static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3607 void *context, int vl, int mode,
3608 u64 data)
3609{
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612 return dd->send_egress_err_status_cnt[4];
3613}
3614
3615static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3616 const struct cntr_entry *entry,
3617 void *context, int vl, int mode, u64 data)
3618{
3619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620
3621 return dd->send_egress_err_status_cnt[3];
3622}
3623
3624static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3625 void *context, int vl, int mode,
3626 u64 data)
3627{
3628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629
3630 return dd->send_egress_err_status_cnt[2];
3631}
3632
3633static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3634 const struct cntr_entry *entry,
3635 void *context, int vl, int mode, u64 data)
3636{
3637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3638
3639 return dd->send_egress_err_status_cnt[1];
3640}
3641
3642static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3643 const struct cntr_entry *entry,
3644 void *context, int vl, int mode, u64 data)
3645{
3646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3647
3648 return dd->send_egress_err_status_cnt[0];
3649}
3650
3651/*
3652 * Software counters corresponding to each of the
3653 * error status bits within SendErrStatus
3654 */
3655static u64 access_send_csr_write_bad_addr_err_cnt(
3656 const struct cntr_entry *entry,
3657 void *context, int vl, int mode, u64 data)
3658{
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661 return dd->send_err_status_cnt[2];
3662}
3663
3664static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3665 void *context, int vl,
3666 int mode, u64 data)
3667{
3668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669
3670 return dd->send_err_status_cnt[1];
3671}
3672
3673static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3674 void *context, int vl, int mode,
3675 u64 data)
3676{
3677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678
3679 return dd->send_err_status_cnt[0];
3680}
3681
3682/*
3683 * Software counters corresponding to each of the
3684 * error status bits within SendCtxtErrStatus
3685 */
3686static u64 access_pio_write_out_of_bounds_err_cnt(
3687 const struct cntr_entry *entry,
3688 void *context, int vl, int mode, u64 data)
3689{
3690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691
3692 return dd->sw_ctxt_err_status_cnt[4];
3693}
3694
3695static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3696 void *context, int vl, int mode,
3697 u64 data)
3698{
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701 return dd->sw_ctxt_err_status_cnt[3];
3702}
3703
3704static u64 access_pio_write_crosses_boundary_err_cnt(
3705 const struct cntr_entry *entry,
3706 void *context, int vl, int mode, u64 data)
3707{
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710 return dd->sw_ctxt_err_status_cnt[2];
3711}
3712
3713static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3714 void *context, int vl,
3715 int mode, u64 data)
3716{
3717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718
3719 return dd->sw_ctxt_err_status_cnt[1];
3720}
3721
3722static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3723 void *context, int vl, int mode,
3724 u64 data)
3725{
3726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3727
3728 return dd->sw_ctxt_err_status_cnt[0];
3729}
3730
3731/*
3732 * Software counters corresponding to each of the
3733 * error status bits within SendDmaEngErrStatus
3734 */
3735static u64 access_sdma_header_request_fifo_cor_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3738{
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741 return dd->sw_send_dma_eng_err_status_cnt[23];
3742}
3743
3744static u64 access_sdma_header_storage_cor_err_cnt(
3745 const struct cntr_entry *entry,
3746 void *context, int vl, int mode, u64 data)
3747{
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750 return dd->sw_send_dma_eng_err_status_cnt[22];
3751}
3752
3753static u64 access_sdma_packet_tracking_cor_err_cnt(
3754 const struct cntr_entry *entry,
3755 void *context, int vl, int mode, u64 data)
3756{
3757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759 return dd->sw_send_dma_eng_err_status_cnt[21];
3760}
3761
3762static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3763 void *context, int vl, int mode,
3764 u64 data)
3765{
3766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768 return dd->sw_send_dma_eng_err_status_cnt[20];
3769}
3770
3771static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3772 void *context, int vl, int mode,
3773 u64 data)
3774{
3775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776
3777 return dd->sw_send_dma_eng_err_status_cnt[19];
3778}
3779
3780static u64 access_sdma_header_request_fifo_unc_err_cnt(
3781 const struct cntr_entry *entry,
3782 void *context, int vl, int mode, u64 data)
3783{
3784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785
3786 return dd->sw_send_dma_eng_err_status_cnt[18];
3787}
3788
3789static u64 access_sdma_header_storage_unc_err_cnt(
3790 const struct cntr_entry *entry,
3791 void *context, int vl, int mode, u64 data)
3792{
3793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795 return dd->sw_send_dma_eng_err_status_cnt[17];
3796}
3797
3798static u64 access_sdma_packet_tracking_unc_err_cnt(
3799 const struct cntr_entry *entry,
3800 void *context, int vl, int mode, u64 data)
3801{
3802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804 return dd->sw_send_dma_eng_err_status_cnt[16];
3805}
3806
3807static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3808 void *context, int vl, int mode,
3809 u64 data)
3810{
3811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813 return dd->sw_send_dma_eng_err_status_cnt[15];
3814}
3815
3816static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3817 void *context, int vl, int mode,
3818 u64 data)
3819{
3820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822 return dd->sw_send_dma_eng_err_status_cnt[14];
3823}
3824
3825static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3826 void *context, int vl, int mode,
3827 u64 data)
3828{
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831 return dd->sw_send_dma_eng_err_status_cnt[13];
3832}
3833
3834static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3835 void *context, int vl, int mode,
3836 u64 data)
3837{
3838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839
3840 return dd->sw_send_dma_eng_err_status_cnt[12];
3841}
3842
3843static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3844 void *context, int vl, int mode,
3845 u64 data)
3846{
3847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848
3849 return dd->sw_send_dma_eng_err_status_cnt[11];
3850}
3851
3852static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3853 void *context, int vl, int mode,
3854 u64 data)
3855{
3856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857
3858 return dd->sw_send_dma_eng_err_status_cnt[10];
3859}
3860
3861static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3862 void *context, int vl, int mode,
3863 u64 data)
3864{
3865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866
3867 return dd->sw_send_dma_eng_err_status_cnt[9];
3868}
3869
3870static u64 access_sdma_packet_desc_overflow_err_cnt(
3871 const struct cntr_entry *entry,
3872 void *context, int vl, int mode, u64 data)
3873{
3874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3875
3876 return dd->sw_send_dma_eng_err_status_cnt[8];
3877}
3878
3879static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3880 void *context, int vl,
3881 int mode, u64 data)
3882{
3883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3884
3885 return dd->sw_send_dma_eng_err_status_cnt[7];
3886}
3887
3888static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3889 void *context, int vl, int mode, u64 data)
3890{
3891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892
3893 return dd->sw_send_dma_eng_err_status_cnt[6];
3894}
3895
3896static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3897 void *context, int vl, int mode,
3898 u64 data)
3899{
3900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901
3902 return dd->sw_send_dma_eng_err_status_cnt[5];
3903}
3904
3905static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3906 void *context, int vl, int mode,
3907 u64 data)
3908{
3909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910
3911 return dd->sw_send_dma_eng_err_status_cnt[4];
3912}
3913
3914static u64 access_sdma_tail_out_of_bounds_err_cnt(
3915 const struct cntr_entry *entry,
3916 void *context, int vl, int mode, u64 data)
3917{
3918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919
3920 return dd->sw_send_dma_eng_err_status_cnt[3];
3921}
3922
3923static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3924 void *context, int vl, int mode,
3925 u64 data)
3926{
3927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928
3929 return dd->sw_send_dma_eng_err_status_cnt[2];
3930}
3931
3932static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3933 void *context, int vl, int mode,
3934 u64 data)
3935{
3936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3937
3938 return dd->sw_send_dma_eng_err_status_cnt[1];
3939}
3940
3941static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3942 void *context, int vl, int mode,
3943 u64 data)
3944{
3945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3946
3947 return dd->sw_send_dma_eng_err_status_cnt[0];
3948}
3949
Mike Marciniszyn77241052015-07-30 15:17:43 -04003950#define def_access_sw_cpu(cntr) \
3951static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3952 void *context, int vl, int mode, u64 data) \
3953{ \
3954 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003955 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3956 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003957 mode, data); \
3958}
3959
3960def_access_sw_cpu(rc_acks);
3961def_access_sw_cpu(rc_qacks);
3962def_access_sw_cpu(rc_delayed_comp);
3963
3964#define def_access_ibp_counter(cntr) \
3965static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3966 void *context, int vl, int mode, u64 data) \
3967{ \
3968 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3969 \
3970 if (vl != CNTR_INVALID_VL) \
3971 return 0; \
3972 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003973 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003974 mode, data); \
3975}
3976
3977def_access_ibp_counter(loop_pkts);
3978def_access_ibp_counter(rc_resends);
3979def_access_ibp_counter(rnr_naks);
3980def_access_ibp_counter(other_naks);
3981def_access_ibp_counter(rc_timeouts);
3982def_access_ibp_counter(pkt_drops);
3983def_access_ibp_counter(dmawait);
3984def_access_ibp_counter(rc_seqnak);
3985def_access_ibp_counter(rc_dupreq);
3986def_access_ibp_counter(rdma_seq);
3987def_access_ibp_counter(unaligned);
3988def_access_ibp_counter(seq_naks);
3989
3990static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3991[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3992[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3993 CNTR_NORMAL),
3994[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3995 CNTR_NORMAL),
3996[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3997 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3998 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003999[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4000 CNTR_NORMAL),
4001[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4002 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4003[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4004 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4005[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4006 CNTR_NORMAL),
4007[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4008 CNTR_NORMAL),
4009[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4010 CNTR_NORMAL),
4011[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4012 CNTR_NORMAL),
4013[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4014 CNTR_NORMAL),
4015[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4016 CNTR_NORMAL),
4017[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4018 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4019[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4020 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4021[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4022 CNTR_SYNTH),
4023[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4024[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4025 CNTR_SYNTH),
4026[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4027 CNTR_SYNTH),
4028[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4029 CNTR_SYNTH),
4030[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4031 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4032[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4033 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4034 CNTR_SYNTH),
4035[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4036 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4037[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4038 CNTR_SYNTH),
4039[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4040 CNTR_SYNTH),
4041[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4042 CNTR_SYNTH),
4043[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4044 CNTR_SYNTH),
4045[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4046 CNTR_SYNTH),
4047[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4048 CNTR_SYNTH),
4049[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4050 CNTR_SYNTH),
4051[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4052 CNTR_SYNTH | CNTR_VL),
4053[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4054 CNTR_SYNTH | CNTR_VL),
4055[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4056[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4057 CNTR_SYNTH | CNTR_VL),
4058[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4059[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4060 CNTR_SYNTH | CNTR_VL),
4061[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4062 CNTR_SYNTH),
4063[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4064 CNTR_SYNTH | CNTR_VL),
4065[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4066 CNTR_SYNTH),
4067[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4068 CNTR_SYNTH | CNTR_VL),
4069[C_DC_TOTAL_CRC] =
4070 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4071 CNTR_SYNTH),
4072[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4073 CNTR_SYNTH),
4074[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4075 CNTR_SYNTH),
4076[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4077 CNTR_SYNTH),
4078[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4079 CNTR_SYNTH),
4080[C_DC_CRC_MULT_LN] =
4081 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4082 CNTR_SYNTH),
4083[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4084 CNTR_SYNTH),
4085[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4086 CNTR_SYNTH),
4087[C_DC_SEQ_CRC_CNT] =
4088 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4089 CNTR_SYNTH),
4090[C_DC_ESC0_ONLY_CNT] =
4091 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4092 CNTR_SYNTH),
4093[C_DC_ESC0_PLUS1_CNT] =
4094 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4095 CNTR_SYNTH),
4096[C_DC_ESC0_PLUS2_CNT] =
4097 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4098 CNTR_SYNTH),
4099[C_DC_REINIT_FROM_PEER_CNT] =
4100 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4101 CNTR_SYNTH),
4102[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4103 CNTR_SYNTH),
4104[C_DC_MISC_FLG_CNT] =
4105 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4106 CNTR_SYNTH),
4107[C_DC_PRF_GOOD_LTP_CNT] =
4108 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4109[C_DC_PRF_ACCEPTED_LTP_CNT] =
4110 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4111 CNTR_SYNTH),
4112[C_DC_PRF_RX_FLIT_CNT] =
4113 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4114[C_DC_PRF_TX_FLIT_CNT] =
4115 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4116[C_DC_PRF_CLK_CNTR] =
4117 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4118[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4119 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4120[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4121 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4122 CNTR_SYNTH),
4123[C_DC_PG_STS_TX_SBE_CNT] =
4124 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4125[C_DC_PG_STS_TX_MBE_CNT] =
4126 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4127 CNTR_SYNTH),
4128[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4129 access_sw_cpu_intr),
4130[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4131 access_sw_cpu_rcv_limit),
4132[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4133 access_sw_vtx_wait),
4134[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4135 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004136[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4137 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004138[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4139 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004140[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4141 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004142[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4143 SEND_DMA_DESC_FETCHED_CNT, 0,
4144 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4145 dev_access_u32_csr),
4146[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4147 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4148 access_sde_int_cnt),
4149[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4150 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4151 access_sde_err_cnt),
4152[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4153 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4154 access_sde_idle_int_cnt),
4155[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4156 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4157 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004158/* MISC_ERR_STATUS */
4159[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4160 CNTR_NORMAL,
4161 access_misc_pll_lock_fail_err_cnt),
4162[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4163 CNTR_NORMAL,
4164 access_misc_mbist_fail_err_cnt),
4165[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4166 CNTR_NORMAL,
4167 access_misc_invalid_eep_cmd_err_cnt),
4168[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4169 CNTR_NORMAL,
4170 access_misc_efuse_done_parity_err_cnt),
4171[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4172 CNTR_NORMAL,
4173 access_misc_efuse_write_err_cnt),
4174[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4175 0, CNTR_NORMAL,
4176 access_misc_efuse_read_bad_addr_err_cnt),
4177[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4178 CNTR_NORMAL,
4179 access_misc_efuse_csr_parity_err_cnt),
4180[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4181 CNTR_NORMAL,
4182 access_misc_fw_auth_failed_err_cnt),
4183[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4184 CNTR_NORMAL,
4185 access_misc_key_mismatch_err_cnt),
4186[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4187 CNTR_NORMAL,
4188 access_misc_sbus_write_failed_err_cnt),
4189[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4190 CNTR_NORMAL,
4191 access_misc_csr_write_bad_addr_err_cnt),
4192[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4193 CNTR_NORMAL,
4194 access_misc_csr_read_bad_addr_err_cnt),
4195[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4196 CNTR_NORMAL,
4197 access_misc_csr_parity_err_cnt),
4198/* CceErrStatus */
4199[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4200 CNTR_NORMAL,
4201 access_sw_cce_err_status_aggregated_cnt),
4202[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4203 CNTR_NORMAL,
4204 access_cce_msix_csr_parity_err_cnt),
4205[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4206 CNTR_NORMAL,
4207 access_cce_int_map_unc_err_cnt),
4208[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4209 CNTR_NORMAL,
4210 access_cce_int_map_cor_err_cnt),
4211[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4212 CNTR_NORMAL,
4213 access_cce_msix_table_unc_err_cnt),
4214[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4215 CNTR_NORMAL,
4216 access_cce_msix_table_cor_err_cnt),
4217[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4218 0, CNTR_NORMAL,
4219 access_cce_rxdma_conv_fifo_parity_err_cnt),
4220[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4221 0, CNTR_NORMAL,
4222 access_cce_rcpl_async_fifo_parity_err_cnt),
4223[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4224 CNTR_NORMAL,
4225 access_cce_seg_write_bad_addr_err_cnt),
4226[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4227 CNTR_NORMAL,
4228 access_cce_seg_read_bad_addr_err_cnt),
4229[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4230 CNTR_NORMAL,
4231 access_la_triggered_cnt),
4232[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_cce_trgt_cpl_timeout_err_cnt),
4235[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_pcic_receive_parity_err_cnt),
4238[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_pcic_transmit_back_parity_err_cnt),
4241[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4242 0, CNTR_NORMAL,
4243 access_pcic_transmit_front_parity_err_cnt),
4244[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4245 CNTR_NORMAL,
4246 access_pcic_cpl_dat_q_unc_err_cnt),
4247[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4248 CNTR_NORMAL,
4249 access_pcic_cpl_hd_q_unc_err_cnt),
4250[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4251 CNTR_NORMAL,
4252 access_pcic_post_dat_q_unc_err_cnt),
4253[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_pcic_post_hd_q_unc_err_cnt),
4256[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4257 CNTR_NORMAL,
4258 access_pcic_retry_sot_mem_unc_err_cnt),
4259[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4260 CNTR_NORMAL,
4261 access_pcic_retry_mem_unc_err),
4262[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_pcic_n_post_dat_q_parity_err_cnt),
4265[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4266 CNTR_NORMAL,
4267 access_pcic_n_post_h_q_parity_err_cnt),
4268[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4269 CNTR_NORMAL,
4270 access_pcic_cpl_dat_q_cor_err_cnt),
4271[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4272 CNTR_NORMAL,
4273 access_pcic_cpl_hd_q_cor_err_cnt),
4274[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4275 CNTR_NORMAL,
4276 access_pcic_post_dat_q_cor_err_cnt),
4277[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4278 CNTR_NORMAL,
4279 access_pcic_post_hd_q_cor_err_cnt),
4280[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_pcic_retry_sot_mem_cor_err_cnt),
4283[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4284 CNTR_NORMAL,
4285 access_pcic_retry_mem_cor_err_cnt),
4286[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4287 "CceCli1AsyncFifoDbgParityError", 0, 0,
4288 CNTR_NORMAL,
4289 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4290[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4291 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4292 CNTR_NORMAL,
4293 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4294 ),
4295[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4296 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4297 CNTR_NORMAL,
4298 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4299[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4300 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4303[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4304 0, CNTR_NORMAL,
4305 access_cce_cli2_async_fifo_parity_err_cnt),
4306[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4307 CNTR_NORMAL,
4308 access_cce_csr_cfg_bus_parity_err_cnt),
4309[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4310 0, CNTR_NORMAL,
4311 access_cce_cli0_async_fifo_parity_err_cnt),
4312[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_cce_rspd_data_parity_err_cnt),
4315[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_cce_trgt_access_err_cnt),
4318[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4319 0, CNTR_NORMAL,
4320 access_cce_trgt_async_fifo_parity_err_cnt),
4321[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4322 CNTR_NORMAL,
4323 access_cce_csr_write_bad_addr_err_cnt),
4324[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_cce_csr_read_bad_addr_err_cnt),
4327[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_ccs_csr_parity_err_cnt),
4330
4331/* RcvErrStatus */
4332[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_rx_csr_parity_err_cnt),
4335[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4336 CNTR_NORMAL,
4337 access_rx_csr_write_bad_addr_err_cnt),
4338[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_rx_csr_read_bad_addr_err_cnt),
4341[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4342 CNTR_NORMAL,
4343 access_rx_dma_csr_unc_err_cnt),
4344[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4345 CNTR_NORMAL,
4346 access_rx_dma_dq_fsm_encoding_err_cnt),
4347[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_rx_dma_eq_fsm_encoding_err_cnt),
4350[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4351 CNTR_NORMAL,
4352 access_rx_dma_csr_parity_err_cnt),
4353[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4354 CNTR_NORMAL,
4355 access_rx_rbuf_data_cor_err_cnt),
4356[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4357 CNTR_NORMAL,
4358 access_rx_rbuf_data_unc_err_cnt),
4359[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_rx_dma_data_fifo_rd_cor_err_cnt),
4362[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_dma_data_fifo_rd_unc_err_cnt),
4365[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4368[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4371[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_rbuf_desc_part2_cor_err_cnt),
4374[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_rbuf_desc_part2_unc_err_cnt),
4377[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4378 CNTR_NORMAL,
4379 access_rx_rbuf_desc_part1_cor_err_cnt),
4380[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4381 CNTR_NORMAL,
4382 access_rx_rbuf_desc_part1_unc_err_cnt),
4383[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_hq_intr_fsm_err_cnt),
4386[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_hq_intr_csr_parity_err_cnt),
4389[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_lookup_csr_parity_err_cnt),
4392[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4393 CNTR_NORMAL,
4394 access_rx_lookup_rcv_array_cor_err_cnt),
4395[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4396 CNTR_NORMAL,
4397 access_rx_lookup_rcv_array_unc_err_cnt),
4398[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4399 0, CNTR_NORMAL,
4400 access_rx_lookup_des_part2_parity_err_cnt),
4401[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4402 0, CNTR_NORMAL,
4403 access_rx_lookup_des_part1_unc_cor_err_cnt),
4404[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4405 CNTR_NORMAL,
4406 access_rx_lookup_des_part1_unc_err_cnt),
4407[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rx_rbuf_next_free_buf_cor_err_cnt),
4410[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_rx_rbuf_next_free_buf_unc_err_cnt),
4413[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4414 "RxRbufFlInitWrAddrParityErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4417[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4418 0, CNTR_NORMAL,
4419 access_rx_rbuf_fl_initdone_parity_err_cnt),
4420[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4421 0, CNTR_NORMAL,
4422 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4423[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4424 CNTR_NORMAL,
4425 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4426[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_rx_rbuf_empty_err_cnt),
4429[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4430 CNTR_NORMAL,
4431 access_rx_rbuf_full_err_cnt),
4432[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4433 CNTR_NORMAL,
4434 access_rbuf_bad_lookup_err_cnt),
4435[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4436 CNTR_NORMAL,
4437 access_rbuf_ctx_id_parity_err_cnt),
4438[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4439 CNTR_NORMAL,
4440 access_rbuf_csr_qeopdw_parity_err_cnt),
4441[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4442 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4445[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4446 "RxRbufCsrQTlPtrParityErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4449[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4450 0, CNTR_NORMAL,
4451 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4452[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4453 0, CNTR_NORMAL,
4454 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4455[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4456 0, 0, CNTR_NORMAL,
4457 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4458[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4459 0, CNTR_NORMAL,
4460 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4461[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4462 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4463 CNTR_NORMAL,
4464 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4465[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4466 0, CNTR_NORMAL,
4467 access_rx_rbuf_block_list_read_cor_err_cnt),
4468[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4469 0, CNTR_NORMAL,
4470 access_rx_rbuf_block_list_read_unc_err_cnt),
4471[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4472 CNTR_NORMAL,
4473 access_rx_rbuf_lookup_des_cor_err_cnt),
4474[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4475 CNTR_NORMAL,
4476 access_rx_rbuf_lookup_des_unc_err_cnt),
4477[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4478 "RxRbufLookupDesRegUncCorErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4481[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4484[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_rbuf_free_list_cor_err_cnt),
4487[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rbuf_free_list_unc_err_cnt),
4490[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rcv_fsm_encoding_err_cnt),
4493[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_dma_flag_cor_err_cnt),
4496[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_dma_flag_unc_err_cnt),
4499[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_dc_sop_eop_parity_err_cnt),
4502[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_rcv_csr_parity_err_cnt),
4505[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_rcv_qp_map_table_cor_err_cnt),
4508[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4509 CNTR_NORMAL,
4510 access_rx_rcv_qp_map_table_unc_err_cnt),
4511[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4512 CNTR_NORMAL,
4513 access_rx_rcv_data_cor_err_cnt),
4514[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_rcv_data_unc_err_cnt),
4517[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rcv_hdr_cor_err_cnt),
4520[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_rcv_hdr_unc_err_cnt),
4523[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4524 CNTR_NORMAL,
4525 access_rx_dc_intf_parity_err_cnt),
4526[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_dma_csr_cor_err_cnt),
4529/* SendPioErrStatus */
4530[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4531 CNTR_NORMAL,
4532 access_pio_pec_sop_head_parity_err_cnt),
4533[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_pio_pcc_sop_head_parity_err_cnt),
4536[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4537 0, 0, CNTR_NORMAL,
4538 access_pio_last_returned_cnt_parity_err_cnt),
4539[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4540 0, CNTR_NORMAL,
4541 access_pio_current_free_cnt_parity_err_cnt),
4542[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4543 CNTR_NORMAL,
4544 access_pio_reserved_31_err_cnt),
4545[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4546 CNTR_NORMAL,
4547 access_pio_reserved_30_err_cnt),
4548[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_pio_ppmc_sop_len_err_cnt),
4551[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_pio_ppmc_bqc_mem_parity_err_cnt),
4554[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_pio_vl_fifo_parity_err_cnt),
4557[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_pio_vlf_sop_parity_err_cnt),
4560[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_vlf_v1_len_parity_err_cnt),
4563[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_pio_block_qw_count_parity_err_cnt),
4566[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4567 CNTR_NORMAL,
4568 access_pio_write_qw_valid_parity_err_cnt),
4569[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4570 CNTR_NORMAL,
4571 access_pio_state_machine_err_cnt),
4572[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_write_data_parity_err_cnt),
4575[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_host_addr_mem_cor_err_cnt),
4578[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_host_addr_mem_unc_err_cnt),
4581[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4584[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_init_sm_in_err_cnt),
4587[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_ppmc_pbl_fifo_err_cnt),
4590[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4591 0, CNTR_NORMAL,
4592 access_pio_credit_ret_fifo_parity_err_cnt),
4593[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_pio_v1_len_mem_bank1_cor_err_cnt),
4596[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_pio_v1_len_mem_bank0_cor_err_cnt),
4599[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4600 CNTR_NORMAL,
4601 access_pio_v1_len_mem_bank1_unc_err_cnt),
4602[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4603 CNTR_NORMAL,
4604 access_pio_v1_len_mem_bank0_unc_err_cnt),
4605[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_sm_pkt_reset_parity_err_cnt),
4608[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_pkt_evict_fifo_parity_err_cnt),
4611[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4612 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4615[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_pio_sbrdctl_crrel_parity_err_cnt),
4618[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_pio_pec_fifo_parity_err_cnt),
4621[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4622 CNTR_NORMAL,
4623 access_pio_pcc_fifo_parity_err_cnt),
4624[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4625 CNTR_NORMAL,
4626 access_pio_sb_mem_fifo1_err_cnt),
4627[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4628 CNTR_NORMAL,
4629 access_pio_sb_mem_fifo0_err_cnt),
4630[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_pio_csr_parity_err_cnt),
4633[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_pio_write_addr_parity_err_cnt),
4636[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_pio_write_bad_ctxt_err_cnt),
4639/* SendDmaErrStatus */
4640[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4641 0, CNTR_NORMAL,
4642 access_sdma_pcie_req_tracking_cor_err_cnt),
4643[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4644 0, CNTR_NORMAL,
4645 access_sdma_pcie_req_tracking_unc_err_cnt),
4646[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4647 CNTR_NORMAL,
4648 access_sdma_csr_parity_err_cnt),
4649[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4650 CNTR_NORMAL,
4651 access_sdma_rpy_tag_err_cnt),
4652/* SendEgressErrStatus */
4653[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4654 CNTR_NORMAL,
4655 access_tx_read_pio_memory_csr_unc_err_cnt),
4656[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4657 0, CNTR_NORMAL,
4658 access_tx_read_sdma_memory_csr_err_cnt),
4659[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_tx_egress_fifo_cor_err_cnt),
4662[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_tx_read_pio_memory_cor_err_cnt),
4665[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_tx_read_sdma_memory_cor_err_cnt),
4668[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_tx_sb_hdr_cor_err_cnt),
4671[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4672 CNTR_NORMAL,
4673 access_tx_credit_overrun_err_cnt),
4674[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4675 CNTR_NORMAL,
4676 access_tx_launch_fifo8_cor_err_cnt),
4677[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4678 CNTR_NORMAL,
4679 access_tx_launch_fifo7_cor_err_cnt),
4680[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_tx_launch_fifo6_cor_err_cnt),
4683[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_tx_launch_fifo5_cor_err_cnt),
4686[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4687 CNTR_NORMAL,
4688 access_tx_launch_fifo4_cor_err_cnt),
4689[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_launch_fifo3_cor_err_cnt),
4692[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_launch_fifo2_cor_err_cnt),
4695[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_launch_fifo1_cor_err_cnt),
4698[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_launch_fifo0_cor_err_cnt),
4701[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4702 CNTR_NORMAL,
4703 access_tx_credit_return_vl_err_cnt),
4704[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_tx_hcrc_insertion_err_cnt),
4707[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_tx_egress_fifo_unc_err_cnt),
4710[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_tx_read_pio_memory_unc_err_cnt),
4713[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_tx_read_sdma_memory_unc_err_cnt),
4716[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_tx_sb_hdr_unc_err_cnt),
4719[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4720 CNTR_NORMAL,
4721 access_tx_credit_return_partiy_err_cnt),
4722[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4723 0, 0, CNTR_NORMAL,
4724 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4725[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4726 0, 0, CNTR_NORMAL,
4727 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4728[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4729 0, 0, CNTR_NORMAL,
4730 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4731[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4732 0, 0, CNTR_NORMAL,
4733 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4734[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4735 0, 0, CNTR_NORMAL,
4736 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4737[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4738 0, 0, CNTR_NORMAL,
4739 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4740[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4741 0, 0, CNTR_NORMAL,
4742 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4743[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4744 0, 0, CNTR_NORMAL,
4745 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4746[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4747 0, 0, CNTR_NORMAL,
4748 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4749[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4750 0, 0, CNTR_NORMAL,
4751 access_tx_sdma15_disallowed_packet_err_cnt),
4752[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_sdma14_disallowed_packet_err_cnt),
4755[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_sdma13_disallowed_packet_err_cnt),
4758[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_sdma12_disallowed_packet_err_cnt),
4761[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_sdma11_disallowed_packet_err_cnt),
4764[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_sdma10_disallowed_packet_err_cnt),
4767[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_sdma9_disallowed_packet_err_cnt),
4770[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_sdma8_disallowed_packet_err_cnt),
4773[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_sdma7_disallowed_packet_err_cnt),
4776[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4777 0, 0, CNTR_NORMAL,
4778 access_tx_sdma6_disallowed_packet_err_cnt),
4779[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4780 0, 0, CNTR_NORMAL,
4781 access_tx_sdma5_disallowed_packet_err_cnt),
4782[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4783 0, 0, CNTR_NORMAL,
4784 access_tx_sdma4_disallowed_packet_err_cnt),
4785[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4786 0, 0, CNTR_NORMAL,
4787 access_tx_sdma3_disallowed_packet_err_cnt),
4788[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4789 0, 0, CNTR_NORMAL,
4790 access_tx_sdma2_disallowed_packet_err_cnt),
4791[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4792 0, 0, CNTR_NORMAL,
4793 access_tx_sdma1_disallowed_packet_err_cnt),
4794[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4795 0, 0, CNTR_NORMAL,
4796 access_tx_sdma0_disallowed_packet_err_cnt),
4797[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4798 CNTR_NORMAL,
4799 access_tx_config_parity_err_cnt),
4800[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4801 CNTR_NORMAL,
4802 access_tx_sbrd_ctl_csr_parity_err_cnt),
4803[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4804 CNTR_NORMAL,
4805 access_tx_launch_csr_parity_err_cnt),
4806[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4807 CNTR_NORMAL,
4808 access_tx_illegal_vl_err_cnt),
4809[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4810 "TxSbrdCtlStateMachineParityErr", 0, 0,
4811 CNTR_NORMAL,
4812 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4813[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4814 CNTR_NORMAL,
4815 access_egress_reserved_10_err_cnt),
4816[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4817 CNTR_NORMAL,
4818 access_egress_reserved_9_err_cnt),
4819[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4820 0, 0, CNTR_NORMAL,
4821 access_tx_sdma_launch_intf_parity_err_cnt),
4822[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4823 CNTR_NORMAL,
4824 access_tx_pio_launch_intf_parity_err_cnt),
4825[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4826 CNTR_NORMAL,
4827 access_egress_reserved_6_err_cnt),
4828[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4829 CNTR_NORMAL,
4830 access_tx_incorrect_link_state_err_cnt),
4831[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4832 CNTR_NORMAL,
4833 access_tx_linkdown_err_cnt),
4834[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4835 "EgressFifoUnderrunOrParityErr", 0, 0,
4836 CNTR_NORMAL,
4837 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4838[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4839 CNTR_NORMAL,
4840 access_egress_reserved_2_err_cnt),
4841[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_tx_pkt_integrity_mem_unc_err_cnt),
4844[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4845 CNTR_NORMAL,
4846 access_tx_pkt_integrity_mem_cor_err_cnt),
4847/* SendErrStatus */
4848[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4849 CNTR_NORMAL,
4850 access_send_csr_write_bad_addr_err_cnt),
4851[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4852 CNTR_NORMAL,
4853 access_send_csr_read_bad_addr_err_cnt),
4854[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4855 CNTR_NORMAL,
4856 access_send_csr_parity_cnt),
4857/* SendCtxtErrStatus */
4858[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4859 CNTR_NORMAL,
4860 access_pio_write_out_of_bounds_err_cnt),
4861[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4862 CNTR_NORMAL,
4863 access_pio_write_overflow_err_cnt),
4864[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4865 0, 0, CNTR_NORMAL,
4866 access_pio_write_crosses_boundary_err_cnt),
4867[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4868 CNTR_NORMAL,
4869 access_pio_disallowed_packet_err_cnt),
4870[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4871 CNTR_NORMAL,
4872 access_pio_inconsistent_sop_err_cnt),
4873/* SendDmaEngErrStatus */
4874[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4875 0, 0, CNTR_NORMAL,
4876 access_sdma_header_request_fifo_cor_err_cnt),
4877[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4878 CNTR_NORMAL,
4879 access_sdma_header_storage_cor_err_cnt),
4880[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4881 CNTR_NORMAL,
4882 access_sdma_packet_tracking_cor_err_cnt),
4883[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4884 CNTR_NORMAL,
4885 access_sdma_assembly_cor_err_cnt),
4886[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4887 CNTR_NORMAL,
4888 access_sdma_desc_table_cor_err_cnt),
4889[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4890 0, 0, CNTR_NORMAL,
4891 access_sdma_header_request_fifo_unc_err_cnt),
4892[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4893 CNTR_NORMAL,
4894 access_sdma_header_storage_unc_err_cnt),
4895[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4896 CNTR_NORMAL,
4897 access_sdma_packet_tracking_unc_err_cnt),
4898[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4899 CNTR_NORMAL,
4900 access_sdma_assembly_unc_err_cnt),
4901[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4902 CNTR_NORMAL,
4903 access_sdma_desc_table_unc_err_cnt),
4904[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4905 CNTR_NORMAL,
4906 access_sdma_timeout_err_cnt),
4907[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_header_length_err_cnt),
4910[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_header_address_err_cnt),
4913[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_header_select_err_cnt),
4916[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4917 CNTR_NORMAL,
4918 access_sdma_reserved_9_err_cnt),
4919[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4920 CNTR_NORMAL,
4921 access_sdma_packet_desc_overflow_err_cnt),
4922[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_length_mismatch_err_cnt),
4925[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_sdma_halt_err_cnt),
4928[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4929 CNTR_NORMAL,
4930 access_sdma_mem_read_err_cnt),
4931[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4932 CNTR_NORMAL,
4933 access_sdma_first_desc_err_cnt),
4934[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4935 CNTR_NORMAL,
4936 access_sdma_tail_out_of_bounds_err_cnt),
4937[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4938 CNTR_NORMAL,
4939 access_sdma_too_long_err_cnt),
4940[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4941 CNTR_NORMAL,
4942 access_sdma_gen_mismatch_err_cnt),
4943[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4944 CNTR_NORMAL,
4945 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004946};
4947
4948static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4949[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4950 CNTR_NORMAL),
4951[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4952 CNTR_NORMAL),
4953[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4954 CNTR_NORMAL),
4955[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4956 CNTR_NORMAL),
4957[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4958 CNTR_NORMAL),
4959[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4960 CNTR_NORMAL),
4961[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4962 CNTR_NORMAL),
4963[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4964[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4965[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4966[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004967 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004968[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004969 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004970[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004971 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004972[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4973[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4974[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004975 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004976[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004977 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004978[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4979 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004980[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004981 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004982[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08004983 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4984 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004985[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004986 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004987[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004988 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004989[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4990[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4991[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4992[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4993[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4994[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4995[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4996[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4997[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4998[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4999[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5000[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5001[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5002 access_sw_cpu_rc_acks),
5003[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005004 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005005[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005006 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005007[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5008[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5009[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5010[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5011[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5012[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5013[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5014[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5015[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5016[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5017[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5018[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5019[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5020[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5021[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5022[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5023[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5024[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5025[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5026[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5027[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5028[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5029[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5030[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5031[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5032[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5033[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5034[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5035[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5036[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5037[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5038[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5039[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5040[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5041[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5042[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5043[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5044[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5045[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5046[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5047[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5048[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5049[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5050[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5051[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5052[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5053[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5054[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5055[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5056[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5057[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5058[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5059[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5060[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5061[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5062[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5063[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5064[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5065[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5066[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5067[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5068[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5069[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5070[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5071[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5072[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5073[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5074[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5075[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5076[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5077[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5078[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5079[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5080[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5081[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5082[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5083[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5084[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5085[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5086[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5087};
5088
5089/* ======================================================================== */
5090
Mike Marciniszyn77241052015-07-30 15:17:43 -04005091/* return true if this is chip revision revision a */
5092int is_ax(struct hfi1_devdata *dd)
5093{
5094 u8 chip_rev_minor =
5095 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5096 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5097 return (chip_rev_minor & 0xf0) == 0;
5098}
5099
5100/* return true if this is chip revision revision b */
5101int is_bx(struct hfi1_devdata *dd)
5102{
5103 u8 chip_rev_minor =
5104 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5105 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005106 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005107}
5108
5109/*
5110 * Append string s to buffer buf. Arguments curp and len are the current
5111 * position and remaining length, respectively.
5112 *
5113 * return 0 on success, 1 on out of room
5114 */
5115static int append_str(char *buf, char **curp, int *lenp, const char *s)
5116{
5117 char *p = *curp;
5118 int len = *lenp;
5119 int result = 0; /* success */
5120 char c;
5121
5122 /* add a comma, if first in the buffer */
5123 if (p != buf) {
5124 if (len == 0) {
5125 result = 1; /* out of room */
5126 goto done;
5127 }
5128 *p++ = ',';
5129 len--;
5130 }
5131
5132 /* copy the string */
5133 while ((c = *s++) != 0) {
5134 if (len == 0) {
5135 result = 1; /* out of room */
5136 goto done;
5137 }
5138 *p++ = c;
5139 len--;
5140 }
5141
5142done:
5143 /* write return values */
5144 *curp = p;
5145 *lenp = len;
5146
5147 return result;
5148}
5149
5150/*
5151 * Using the given flag table, print a comma separated string into
5152 * the buffer. End in '*' if the buffer is too short.
5153 */
5154static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005155 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005156{
5157 char extra[32];
5158 char *p = buf;
5159 int len = buf_len;
5160 int no_room = 0;
5161 int i;
5162
5163 /* make sure there is at least 2 so we can form "*" */
5164 if (len < 2)
5165 return "";
5166
5167 len--; /* leave room for a nul */
5168 for (i = 0; i < table_size; i++) {
5169 if (flags & table[i].flag) {
5170 no_room = append_str(buf, &p, &len, table[i].str);
5171 if (no_room)
5172 break;
5173 flags &= ~table[i].flag;
5174 }
5175 }
5176
5177 /* any undocumented bits left? */
5178 if (!no_room && flags) {
5179 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5180 no_room = append_str(buf, &p, &len, extra);
5181 }
5182
5183 /* add * if ran out of room */
5184 if (no_room) {
5185 /* may need to back up to add space for a '*' */
5186 if (len == 0)
5187 --p;
5188 *p++ = '*';
5189 }
5190
5191 /* add final nul - space already allocated above */
5192 *p = 0;
5193 return buf;
5194}
5195
5196/* first 8 CCE error interrupt source names */
5197static const char * const cce_misc_names[] = {
5198 "CceErrInt", /* 0 */
5199 "RxeErrInt", /* 1 */
5200 "MiscErrInt", /* 2 */
5201 "Reserved3", /* 3 */
5202 "PioErrInt", /* 4 */
5203 "SDmaErrInt", /* 5 */
5204 "EgressErrInt", /* 6 */
5205 "TxeErrInt" /* 7 */
5206};
5207
5208/*
5209 * Return the miscellaneous error interrupt name.
5210 */
5211static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5212{
5213 if (source < ARRAY_SIZE(cce_misc_names))
5214 strncpy(buf, cce_misc_names[source], bsize);
5215 else
Jubin John17fb4f22016-02-14 20:21:52 -08005216 snprintf(buf, bsize, "Reserved%u",
5217 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005218
5219 return buf;
5220}
5221
5222/*
5223 * Return the SDMA engine error interrupt name.
5224 */
5225static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5226{
5227 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5228 return buf;
5229}
5230
5231/*
5232 * Return the send context error interrupt name.
5233 */
5234static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5235{
5236 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5237 return buf;
5238}
5239
5240static const char * const various_names[] = {
5241 "PbcInt",
5242 "GpioAssertInt",
5243 "Qsfp1Int",
5244 "Qsfp2Int",
5245 "TCritInt"
5246};
5247
5248/*
5249 * Return the various interrupt name.
5250 */
5251static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5252{
5253 if (source < ARRAY_SIZE(various_names))
5254 strncpy(buf, various_names[source], bsize);
5255 else
Jubin John8638b772016-02-14 20:19:24 -08005256 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005257 return buf;
5258}
5259
5260/*
5261 * Return the DC interrupt name.
5262 */
5263static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5264{
5265 static const char * const dc_int_names[] = {
5266 "common",
5267 "lcb",
5268 "8051",
5269 "lbm" /* local block merge */
5270 };
5271
5272 if (source < ARRAY_SIZE(dc_int_names))
5273 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5274 else
5275 snprintf(buf, bsize, "DCInt%u", source);
5276 return buf;
5277}
5278
5279static const char * const sdma_int_names[] = {
5280 "SDmaInt",
5281 "SdmaIdleInt",
5282 "SdmaProgressInt",
5283};
5284
5285/*
5286 * Return the SDMA engine interrupt name.
5287 */
5288static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5289{
5290 /* what interrupt */
5291 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5292 /* which engine */
5293 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5294
5295 if (likely(what < 3))
5296 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5297 else
5298 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5299 return buf;
5300}
5301
5302/*
5303 * Return the receive available interrupt name.
5304 */
5305static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5306{
5307 snprintf(buf, bsize, "RcvAvailInt%u", source);
5308 return buf;
5309}
5310
5311/*
5312 * Return the receive urgent interrupt name.
5313 */
5314static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5315{
5316 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5317 return buf;
5318}
5319
5320/*
5321 * Return the send credit interrupt name.
5322 */
5323static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5324{
5325 snprintf(buf, bsize, "SendCreditInt%u", source);
5326 return buf;
5327}
5328
5329/*
5330 * Return the reserved interrupt name.
5331 */
5332static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5333{
5334 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5335 return buf;
5336}
5337
5338static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5339{
5340 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005341 cce_err_status_flags,
5342 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005343}
5344
5345static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5346{
5347 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005348 rxe_err_status_flags,
5349 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005350}
5351
5352static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5353{
5354 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005355 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005356}
5357
5358static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5359{
5360 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005361 pio_err_status_flags,
5362 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005363}
5364
5365static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5366{
5367 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005368 sdma_err_status_flags,
5369 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005370}
5371
5372static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5373{
5374 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005375 egress_err_status_flags,
5376 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005377}
5378
5379static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5380{
5381 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005382 egress_err_info_flags,
5383 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005384}
5385
5386static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5387{
5388 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005389 send_err_status_flags,
5390 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005391}
5392
5393static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5394{
5395 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005396 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005397
5398 /*
5399 * For most these errors, there is nothing that can be done except
5400 * report or record it.
5401 */
5402 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005403 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005404
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005405 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5406 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005407 /* this error requires a manual drop into SPC freeze mode */
5408 /* then a fix up */
5409 start_freeze_handling(dd->pport, FREEZE_SELF);
5410 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005411
5412 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5413 if (reg & (1ull << i)) {
5414 incr_cntr64(&dd->cce_err_status_cnt[i]);
5415 /* maintain a counter over all cce_err_status errors */
5416 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5417 }
5418 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005419}
5420
5421/*
5422 * Check counters for receive errors that do not have an interrupt
5423 * associated with them.
5424 */
5425#define RCVERR_CHECK_TIME 10
5426static void update_rcverr_timer(unsigned long opaque)
5427{
5428 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5429 struct hfi1_pportdata *ppd = dd->pport;
5430 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5431
5432 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005433 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005434 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005435 set_link_down_reason(
5436 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5437 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005438 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5439 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005440 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005441
5442 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5443}
5444
5445static int init_rcverr(struct hfi1_devdata *dd)
5446{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305447 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005448 /* Assume the hardware counter has been reset */
5449 dd->rcv_ovfl_cnt = 0;
5450 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5451}
5452
5453static void free_rcverr(struct hfi1_devdata *dd)
5454{
5455 if (dd->rcverr_timer.data)
5456 del_timer_sync(&dd->rcverr_timer);
5457 dd->rcverr_timer.data = 0;
5458}
5459
5460static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5461{
5462 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005463 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005464
5465 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005466 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005467
5468 if (reg & ALL_RXE_FREEZE_ERR) {
5469 int flags = 0;
5470
5471 /*
5472 * Freeze mode recovery is disabled for the errors
5473 * in RXE_FREEZE_ABORT_MASK
5474 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005475 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005476 flags = FREEZE_ABORT;
5477
5478 start_freeze_handling(dd->pport, flags);
5479 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005480
5481 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5482 if (reg & (1ull << i))
5483 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5484 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005485}
5486
5487static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5488{
5489 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005490 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005491
5492 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005493 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005494 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5495 if (reg & (1ull << i))
5496 incr_cntr64(&dd->misc_err_status_cnt[i]);
5497 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005498}
5499
5500static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5501{
5502 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005503 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005504
5505 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005506 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005507
5508 if (reg & ALL_PIO_FREEZE_ERR)
5509 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005510
5511 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5512 if (reg & (1ull << i))
5513 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5514 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005515}
5516
5517static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518{
5519 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005520 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005521
5522 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005523 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005524
5525 if (reg & ALL_SDMA_FREEZE_ERR)
5526 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005527
5528 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5529 if (reg & (1ull << i))
5530 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5531 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005532}
5533
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005534static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5535{
5536 incr_cntr64(&ppd->port_xmit_discards);
5537}
5538
Mike Marciniszyn77241052015-07-30 15:17:43 -04005539static void count_port_inactive(struct hfi1_devdata *dd)
5540{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005541 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005542}
5543
5544/*
5545 * We have had a "disallowed packet" error during egress. Determine the
5546 * integrity check which failed, and update relevant error counter, etc.
5547 *
5548 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5549 * bit of state per integrity check, and so we can miss the reason for an
5550 * egress error if more than one packet fails the same integrity check
5551 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5552 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005553static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5554 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005555{
5556 struct hfi1_pportdata *ppd = dd->pport;
5557 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5558 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5559 char buf[96];
5560
5561 /* clear down all observed info as quickly as possible after read */
5562 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5563
5564 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005565 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5566 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005567
5568 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005569 if (info & PORT_DISCARD_EGRESS_ERRS) {
5570 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005571
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005572 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005573 * Count all applicable bits as individual errors and
5574 * attribute them to the packet that triggered this handler.
5575 * This may not be completely accurate due to limitations
5576 * on the available hardware error information. There is
5577 * a single information register and any number of error
5578 * packets may have occurred and contributed to it before
5579 * this routine is called. This means that:
5580 * a) If multiple packets with the same error occur before
5581 * this routine is called, earlier packets are missed.
5582 * There is only a single bit for each error type.
5583 * b) Errors may not be attributed to the correct VL.
5584 * The driver is attributing all bits in the info register
5585 * to the packet that triggered this call, but bits
5586 * could be an accumulation of different packets with
5587 * different VLs.
5588 * c) A single error packet may have multiple counts attached
5589 * to it. There is no way for the driver to know if
5590 * multiple bits set in the info register are due to a
5591 * single packet or multiple packets. The driver assumes
5592 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005593 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005594 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005595 for (i = 0; i < weight; i++) {
5596 __count_port_discards(ppd);
5597 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5598 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5599 else if (vl == 15)
5600 incr_cntr64(&ppd->port_xmit_discards_vl
5601 [C_VL_15]);
5602 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005603 }
5604}
5605
5606/*
5607 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5608 * register. Does it represent a 'port inactive' error?
5609 */
5610static inline int port_inactive_err(u64 posn)
5611{
5612 return (posn >= SEES(TX_LINKDOWN) &&
5613 posn <= SEES(TX_INCORRECT_LINK_STATE));
5614}
5615
5616/*
5617 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5618 * register. Does it represent a 'disallowed packet' error?
5619 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005620static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005621{
5622 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5623 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5624}
5625
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005626/*
5627 * Input value is a bit position of one of the SDMA engine disallowed
5628 * packet errors. Return which engine. Use of this must be guarded by
5629 * disallowed_pkt_err().
5630 */
5631static inline int disallowed_pkt_engine(int posn)
5632{
5633 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5634}
5635
5636/*
5637 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5638 * be done.
5639 */
5640static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5641{
5642 struct sdma_vl_map *m;
5643 int vl;
5644
5645 /* range check */
5646 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5647 return -1;
5648
5649 rcu_read_lock();
5650 m = rcu_dereference(dd->sdma_map);
5651 vl = m->engine_to_vl[engine];
5652 rcu_read_unlock();
5653
5654 return vl;
5655}
5656
5657/*
5658 * Translate the send context (sofware index) into a VL. Return -1 if the
5659 * translation cannot be done.
5660 */
5661static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5662{
5663 struct send_context_info *sci;
5664 struct send_context *sc;
5665 int i;
5666
5667 sci = &dd->send_contexts[sw_index];
5668
5669 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005670 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005671 return -1;
5672
5673 sc = sci->sc;
5674 if (!sc)
5675 return -1;
5676 if (dd->vld[15].sc == sc)
5677 return 15;
5678 for (i = 0; i < num_vls; i++)
5679 if (dd->vld[i].sc == sc)
5680 return i;
5681
5682 return -1;
5683}
5684
Mike Marciniszyn77241052015-07-30 15:17:43 -04005685static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5686{
5687 u64 reg_copy = reg, handled = 0;
5688 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005689 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005690
5691 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5692 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005693 else if (is_ax(dd) &&
5694 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5695 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005696 start_freeze_handling(dd->pport, 0);
5697
5698 while (reg_copy) {
5699 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005700 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005701 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005702 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005703
5704 if (port_inactive_err(shift)) {
5705 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005706 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005707 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005708 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5709
5710 handle_send_egress_err_info(dd, vl);
5711 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005712 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005713 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005714 }
5715
5716 reg &= ~handled;
5717
5718 if (reg)
5719 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005720 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005721
5722 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5723 if (reg & (1ull << i))
5724 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5725 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005726}
5727
5728static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5729{
5730 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005731 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005732
5733 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005734 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005735
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005736 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5737 if (reg & (1ull << i))
5738 incr_cntr64(&dd->send_err_status_cnt[i]);
5739 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005740}
5741
5742/*
5743 * The maximum number of times the error clear down will loop before
5744 * blocking a repeating error. This value is arbitrary.
5745 */
5746#define MAX_CLEAR_COUNT 20
5747
5748/*
5749 * Clear and handle an error register. All error interrupts are funneled
5750 * through here to have a central location to correctly handle single-
5751 * or multi-shot errors.
5752 *
5753 * For non per-context registers, call this routine with a context value
5754 * of 0 so the per-context offset is zero.
5755 *
5756 * If the handler loops too many times, assume that something is wrong
5757 * and can't be fixed, so mask the error bits.
5758 */
5759static void interrupt_clear_down(struct hfi1_devdata *dd,
5760 u32 context,
5761 const struct err_reg_info *eri)
5762{
5763 u64 reg;
5764 u32 count;
5765
5766 /* read in a loop until no more errors are seen */
5767 count = 0;
5768 while (1) {
5769 reg = read_kctxt_csr(dd, context, eri->status);
5770 if (reg == 0)
5771 break;
5772 write_kctxt_csr(dd, context, eri->clear, reg);
5773 if (likely(eri->handler))
5774 eri->handler(dd, context, reg);
5775 count++;
5776 if (count > MAX_CLEAR_COUNT) {
5777 u64 mask;
5778
5779 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005780 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005781 /*
5782 * Read-modify-write so any other masked bits
5783 * remain masked.
5784 */
5785 mask = read_kctxt_csr(dd, context, eri->mask);
5786 mask &= ~reg;
5787 write_kctxt_csr(dd, context, eri->mask, mask);
5788 break;
5789 }
5790 }
5791}
5792
5793/*
5794 * CCE block "misc" interrupt. Source is < 16.
5795 */
5796static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5797{
5798 const struct err_reg_info *eri = &misc_errs[source];
5799
5800 if (eri->handler) {
5801 interrupt_clear_down(dd, 0, eri);
5802 } else {
5803 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005804 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005805 }
5806}
5807
5808static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5809{
5810 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005811 sc_err_status_flags,
5812 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005813}
5814
5815/*
5816 * Send context error interrupt. Source (hw_context) is < 160.
5817 *
5818 * All send context errors cause the send context to halt. The normal
5819 * clear-down mechanism cannot be used because we cannot clear the
5820 * error bits until several other long-running items are done first.
5821 * This is OK because with the context halted, nothing else is going
5822 * to happen on it anyway.
5823 */
5824static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5825 unsigned int hw_context)
5826{
5827 struct send_context_info *sci;
5828 struct send_context *sc;
5829 char flags[96];
5830 u64 status;
5831 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005832 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005833
5834 sw_index = dd->hw_to_sw[hw_context];
5835 if (sw_index >= dd->num_send_contexts) {
5836 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005837 "out of range sw index %u for send context %u\n",
5838 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005839 return;
5840 }
5841 sci = &dd->send_contexts[sw_index];
5842 sc = sci->sc;
5843 if (!sc) {
5844 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005845 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005846 return;
5847 }
5848
5849 /* tell the software that a halt has begun */
5850 sc_stop(sc, SCF_HALTED);
5851
5852 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5853
5854 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005855 send_context_err_status_string(flags, sizeof(flags),
5856 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005857
5858 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005859 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005860
5861 /*
5862 * Automatically restart halted kernel contexts out of interrupt
5863 * context. User contexts must ask the driver to restart the context.
5864 */
5865 if (sc->type != SC_USER)
5866 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005867
5868 /*
5869 * Update the counters for the corresponding status bits.
5870 * Note that these particular counters are aggregated over all
5871 * 160 contexts.
5872 */
5873 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5874 if (status & (1ull << i))
5875 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5876 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005877}
5878
5879static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5880 unsigned int source, u64 status)
5881{
5882 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005883 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005884
5885 sde = &dd->per_sdma[source];
5886#ifdef CONFIG_SDMA_VERBOSITY
5887 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5888 slashstrip(__FILE__), __LINE__, __func__);
5889 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5890 sde->this_idx, source, (unsigned long long)status);
5891#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005892 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005893 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005894
5895 /*
5896 * Update the counters for the corresponding status bits.
5897 * Note that these particular counters are aggregated over
5898 * all 16 DMA engines.
5899 */
5900 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5901 if (status & (1ull << i))
5902 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5903 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005904}
5905
5906/*
5907 * CCE block SDMA error interrupt. Source is < 16.
5908 */
5909static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5910{
5911#ifdef CONFIG_SDMA_VERBOSITY
5912 struct sdma_engine *sde = &dd->per_sdma[source];
5913
5914 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5915 slashstrip(__FILE__), __LINE__, __func__);
5916 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5917 source);
5918 sdma_dumpstate(sde);
5919#endif
5920 interrupt_clear_down(dd, source, &sdma_eng_err);
5921}
5922
5923/*
5924 * CCE block "various" interrupt. Source is < 8.
5925 */
5926static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5927{
5928 const struct err_reg_info *eri = &various_err[source];
5929
5930 /*
5931 * TCritInt cannot go through interrupt_clear_down()
5932 * because it is not a second tier interrupt. The handler
5933 * should be called directly.
5934 */
5935 if (source == TCRIT_INT_SOURCE)
5936 handle_temp_err(dd);
5937 else if (eri->handler)
5938 interrupt_clear_down(dd, 0, eri);
5939 else
5940 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005941 "%s: Unimplemented/reserved interrupt %d\n",
5942 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005943}
5944
5945static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5946{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005947 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005948 struct hfi1_pportdata *ppd = dd->pport;
5949 unsigned long flags;
5950 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5951
5952 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005953 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005954 dd_dev_info(dd, "%s: QSFP module removed\n",
5955 __func__);
5956
Mike Marciniszyn77241052015-07-30 15:17:43 -04005957 ppd->driver_link_ready = 0;
5958 /*
5959 * Cable removed, reset all our information about the
5960 * cache and cable capabilities
5961 */
5962
5963 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5964 /*
5965 * We don't set cache_refresh_required here as we expect
5966 * an interrupt when a cable is inserted
5967 */
5968 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005969 ppd->qsfp_info.reset_needed = 0;
5970 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005971 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08005972 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005973 /* Invert the ModPresent pin now to detect plug-in */
5974 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5975 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005976
5977 if ((ppd->offline_disabled_reason >
5978 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005979 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005980 (ppd->offline_disabled_reason ==
5981 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5982 ppd->offline_disabled_reason =
5983 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005984 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005985
Mike Marciniszyn77241052015-07-30 15:17:43 -04005986 if (ppd->host_link_state == HLS_DN_POLL) {
5987 /*
5988 * The link is still in POLL. This means
5989 * that the normal link down processing
5990 * will not happen. We have to do it here
5991 * before turning the DC off.
5992 */
5993 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5994 }
5995 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005996 dd_dev_info(dd, "%s: QSFP module inserted\n",
5997 __func__);
5998
Mike Marciniszyn77241052015-07-30 15:17:43 -04005999 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6000 ppd->qsfp_info.cache_valid = 0;
6001 ppd->qsfp_info.cache_refresh_required = 1;
6002 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006003 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006004
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006005 /*
6006 * Stop inversion of ModPresent pin to detect
6007 * removal of the cable
6008 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006009 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006010 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6011 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6012
6013 ppd->offline_disabled_reason =
6014 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006015 }
6016 }
6017
6018 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006019 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006020 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006021 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6022 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006023 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6024 }
6025
6026 /* Schedule the QSFP work only if there is a cable attached. */
6027 if (qsfp_mod_present(ppd))
6028 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6029}
6030
6031static int request_host_lcb_access(struct hfi1_devdata *dd)
6032{
6033 int ret;
6034
6035 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006036 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6037 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006038 if (ret != HCMD_SUCCESS) {
6039 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006040 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006041 }
6042 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6043}
6044
6045static int request_8051_lcb_access(struct hfi1_devdata *dd)
6046{
6047 int ret;
6048
6049 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006050 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6051 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006052 if (ret != HCMD_SUCCESS) {
6053 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006054 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006055 }
6056 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6057}
6058
6059/*
6060 * Set the LCB selector - allow host access. The DCC selector always
6061 * points to the host.
6062 */
6063static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6064{
6065 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006066 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6067 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006068}
6069
6070/*
6071 * Clear the LCB selector - allow 8051 access. The DCC selector always
6072 * points to the host.
6073 */
6074static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6075{
6076 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006077 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006078}
6079
6080/*
6081 * Acquire LCB access from the 8051. If the host already has access,
6082 * just increment a counter. Otherwise, inform the 8051 that the
6083 * host is taking access.
6084 *
6085 * Returns:
6086 * 0 on success
6087 * -EBUSY if the 8051 has control and cannot be disturbed
6088 * -errno if unable to acquire access from the 8051
6089 */
6090int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6091{
6092 struct hfi1_pportdata *ppd = dd->pport;
6093 int ret = 0;
6094
6095 /*
6096 * Use the host link state lock so the operation of this routine
6097 * { link state check, selector change, count increment } can occur
6098 * as a unit against a link state change. Otherwise there is a
6099 * race between the state change and the count increment.
6100 */
6101 if (sleep_ok) {
6102 mutex_lock(&ppd->hls_lock);
6103 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006104 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006105 udelay(1);
6106 }
6107
6108 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006109 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006110 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006111 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006112 ret = -EBUSY;
6113 goto done;
6114 }
6115
6116 if (dd->lcb_access_count == 0) {
6117 ret = request_host_lcb_access(dd);
6118 if (ret) {
6119 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006120 "%s: unable to acquire LCB access, err %d\n",
6121 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006122 goto done;
6123 }
6124 set_host_lcb_access(dd);
6125 }
6126 dd->lcb_access_count++;
6127done:
6128 mutex_unlock(&ppd->hls_lock);
6129 return ret;
6130}
6131
6132/*
6133 * Release LCB access by decrementing the use count. If the count is moving
6134 * from 1 to 0, inform 8051 that it has control back.
6135 *
6136 * Returns:
6137 * 0 on success
6138 * -errno if unable to release access to the 8051
6139 */
6140int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6141{
6142 int ret = 0;
6143
6144 /*
6145 * Use the host link state lock because the acquire needed it.
6146 * Here, we only need to keep { selector change, count decrement }
6147 * as a unit.
6148 */
6149 if (sleep_ok) {
6150 mutex_lock(&dd->pport->hls_lock);
6151 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006152 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006153 udelay(1);
6154 }
6155
6156 if (dd->lcb_access_count == 0) {
6157 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006158 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006159 goto done;
6160 }
6161
6162 if (dd->lcb_access_count == 1) {
6163 set_8051_lcb_access(dd);
6164 ret = request_8051_lcb_access(dd);
6165 if (ret) {
6166 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006167 "%s: unable to release LCB access, err %d\n",
6168 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006169 /* restore host access if the grant didn't work */
6170 set_host_lcb_access(dd);
6171 goto done;
6172 }
6173 }
6174 dd->lcb_access_count--;
6175done:
6176 mutex_unlock(&dd->pport->hls_lock);
6177 return ret;
6178}
6179
6180/*
6181 * Initialize LCB access variables and state. Called during driver load,
6182 * after most of the initialization is finished.
6183 *
6184 * The DC default is LCB access on for the host. The driver defaults to
6185 * leaving access to the 8051. Assign access now - this constrains the call
6186 * to this routine to be after all LCB set-up is done. In particular, after
6187 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6188 */
6189static void init_lcb_access(struct hfi1_devdata *dd)
6190{
6191 dd->lcb_access_count = 0;
6192}
6193
6194/*
6195 * Write a response back to a 8051 request.
6196 */
6197static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6198{
6199 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006200 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6201 (u64)return_code <<
6202 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6203 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006204}
6205
6206/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006207 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006208 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006209static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006210{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006211 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006212 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006213 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006214 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006215
6216 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6217 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6218 return; /* no request */
6219
6220 /* zero out COMPLETED so the response is seen */
6221 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6222
6223 /* extract request details */
6224 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6225 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6226 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6227 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6228
6229 switch (type) {
6230 case HREQ_LOAD_CONFIG:
6231 case HREQ_SAVE_CONFIG:
6232 case HREQ_READ_CONFIG:
6233 case HREQ_SET_TX_EQ_ABS:
6234 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006235 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006236 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006237 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006238 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6239 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006240 case HREQ_CONFIG_DONE:
6241 hreq_response(dd, HREQ_SUCCESS, 0);
6242 break;
6243
6244 case HREQ_INTERFACE_TEST:
6245 hreq_response(dd, HREQ_SUCCESS, data);
6246 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006247 default:
6248 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6249 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6250 break;
6251 }
6252}
6253
6254static void write_global_credit(struct hfi1_devdata *dd,
6255 u8 vau, u16 total, u16 shared)
6256{
6257 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006258 ((u64)total <<
6259 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6260 ((u64)shared <<
6261 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6262 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006263}
6264
6265/*
6266 * Set up initial VL15 credits of the remote. Assumes the rest of
6267 * the CM credit registers are zero from a previous global or credit reset .
6268 */
6269void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6270{
6271 /* leave shared count at zero for both global and VL15 */
6272 write_global_credit(dd, vau, vl15buf, 0);
6273
6274 /* We may need some credits for another VL when sending packets
6275 * with the snoop interface. Dividing it down the middle for VL15
6276 * and VL0 should suffice.
6277 */
6278 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6279 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6280 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6281 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6282 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6283 } else {
6284 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6285 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6286 }
6287}
6288
6289/*
6290 * Zero all credit details from the previous connection and
6291 * reset the CM manager's internal counters.
6292 */
6293void reset_link_credits(struct hfi1_devdata *dd)
6294{
6295 int i;
6296
6297 /* remove all previous VL credit limits */
6298 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006299 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006300 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6301 write_global_credit(dd, 0, 0, 0);
6302 /* reset the CM block */
6303 pio_send_control(dd, PSC_CM_RESET);
6304}
6305
6306/* convert a vCU to a CU */
6307static u32 vcu_to_cu(u8 vcu)
6308{
6309 return 1 << vcu;
6310}
6311
6312/* convert a CU to a vCU */
6313static u8 cu_to_vcu(u32 cu)
6314{
6315 return ilog2(cu);
6316}
6317
6318/* convert a vAU to an AU */
6319static u32 vau_to_au(u8 vau)
6320{
6321 return 8 * (1 << vau);
6322}
6323
6324static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6325{
6326 ppd->sm_trap_qp = 0x0;
6327 ppd->sa_qp = 0x1;
6328}
6329
6330/*
6331 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6332 */
6333static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6334{
6335 u64 reg;
6336
6337 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6338 write_csr(dd, DC_LCB_CFG_RUN, 0);
6339 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6340 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006341 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006342 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6343 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6344 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006345 write_csr(dd, DCC_CFG_RESET, reg |
6346 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6347 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006348 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006349 if (!abort) {
6350 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6351 write_csr(dd, DCC_CFG_RESET, reg);
6352 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6353 }
6354}
6355
6356/*
6357 * This routine should be called after the link has been transitioned to
6358 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6359 * reset).
6360 *
6361 * The expectation is that the caller of this routine would have taken
6362 * care of properly transitioning the link into the correct state.
6363 */
6364static void dc_shutdown(struct hfi1_devdata *dd)
6365{
6366 unsigned long flags;
6367
6368 spin_lock_irqsave(&dd->dc8051_lock, flags);
6369 if (dd->dc_shutdown) {
6370 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6371 return;
6372 }
6373 dd->dc_shutdown = 1;
6374 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6375 /* Shutdown the LCB */
6376 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006377 /*
6378 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006379 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006380 * itself.
6381 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006382 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6383}
6384
Jubin John4d114fd2016-02-14 20:21:43 -08006385/*
6386 * Calling this after the DC has been brought out of reset should not
6387 * do any damage.
6388 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006389static void dc_start(struct hfi1_devdata *dd)
6390{
6391 unsigned long flags;
6392 int ret;
6393
6394 spin_lock_irqsave(&dd->dc8051_lock, flags);
6395 if (!dd->dc_shutdown)
6396 goto done;
6397 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6398 /* Take the 8051 out of reset */
6399 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6400 /* Wait until 8051 is ready */
6401 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6402 if (ret) {
6403 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006404 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006405 }
6406 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6407 write_csr(dd, DCC_CFG_RESET, 0x10);
6408 /* lcb_shutdown() with abort=1 does not restore these */
6409 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6410 spin_lock_irqsave(&dd->dc8051_lock, flags);
6411 dd->dc_shutdown = 0;
6412done:
6413 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6414}
6415
6416/*
6417 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6418 */
6419static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6420{
6421 u64 rx_radr, tx_radr;
6422 u32 version;
6423
6424 if (dd->icode != ICODE_FPGA_EMULATION)
6425 return;
6426
6427 /*
6428 * These LCB defaults on emulator _s are good, nothing to do here:
6429 * LCB_CFG_TX_FIFOS_RADR
6430 * LCB_CFG_RX_FIFOS_RADR
6431 * LCB_CFG_LN_DCLK
6432 * LCB_CFG_IGNORE_LOST_RCLK
6433 */
6434 if (is_emulator_s(dd))
6435 return;
6436 /* else this is _p */
6437
6438 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006439 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006440 version = 0x2d; /* all B0 use 0x2d or higher settings */
6441
6442 if (version <= 0x12) {
6443 /* release 0x12 and below */
6444
6445 /*
6446 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6447 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6448 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6449 */
6450 rx_radr =
6451 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6452 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6453 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6454 /*
6455 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6456 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6457 */
6458 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6459 } else if (version <= 0x18) {
6460 /* release 0x13 up to 0x18 */
6461 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6462 rx_radr =
6463 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6464 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6465 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6466 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6467 } else if (version == 0x19) {
6468 /* release 0x19 */
6469 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6470 rx_radr =
6471 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6475 } else if (version == 0x1a) {
6476 /* release 0x1a */
6477 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6478 rx_radr =
6479 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6480 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6481 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6482 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6483 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6484 } else {
6485 /* release 0x1b and higher */
6486 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6487 rx_radr =
6488 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6491 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6492 }
6493
6494 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6495 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6496 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006497 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006498 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6499}
6500
6501/*
6502 * Handle a SMA idle message
6503 *
6504 * This is a work-queue function outside of the interrupt.
6505 */
6506void handle_sma_message(struct work_struct *work)
6507{
6508 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6509 sma_message_work);
6510 struct hfi1_devdata *dd = ppd->dd;
6511 u64 msg;
6512 int ret;
6513
Jubin John4d114fd2016-02-14 20:21:43 -08006514 /*
6515 * msg is bytes 1-4 of the 40-bit idle message - the command code
6516 * is stripped off
6517 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006518 ret = read_idle_sma(dd, &msg);
6519 if (ret)
6520 return;
6521 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6522 /*
6523 * React to the SMA message. Byte[1] (0 for us) is the command.
6524 */
6525 switch (msg & 0xff) {
6526 case SMA_IDLE_ARM:
6527 /*
6528 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6529 * State Transitions
6530 *
6531 * Only expected in INIT or ARMED, discard otherwise.
6532 */
6533 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6534 ppd->neighbor_normal = 1;
6535 break;
6536 case SMA_IDLE_ACTIVE:
6537 /*
6538 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6539 * State Transitions
6540 *
6541 * Can activate the node. Discard otherwise.
6542 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006543 if (ppd->host_link_state == HLS_UP_ARMED &&
6544 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006545 ppd->neighbor_normal = 1;
6546 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6547 if (ret)
6548 dd_dev_err(
6549 dd,
6550 "%s: received Active SMA idle message, couldn't set link to Active\n",
6551 __func__);
6552 }
6553 break;
6554 default:
6555 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006556 "%s: received unexpected SMA idle message 0x%llx\n",
6557 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006558 break;
6559 }
6560}
6561
6562static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6563{
6564 u64 rcvctrl;
6565 unsigned long flags;
6566
6567 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6568 rcvctrl = read_csr(dd, RCV_CTRL);
6569 rcvctrl |= add;
6570 rcvctrl &= ~clear;
6571 write_csr(dd, RCV_CTRL, rcvctrl);
6572 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6573}
6574
6575static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6576{
6577 adjust_rcvctrl(dd, add, 0);
6578}
6579
6580static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6581{
6582 adjust_rcvctrl(dd, 0, clear);
6583}
6584
6585/*
6586 * Called from all interrupt handlers to start handling an SPC freeze.
6587 */
6588void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6589{
6590 struct hfi1_devdata *dd = ppd->dd;
6591 struct send_context *sc;
6592 int i;
6593
6594 if (flags & FREEZE_SELF)
6595 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6596
6597 /* enter frozen mode */
6598 dd->flags |= HFI1_FROZEN;
6599
6600 /* notify all SDMA engines that they are going into a freeze */
6601 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6602
6603 /* do halt pre-handling on all enabled send contexts */
6604 for (i = 0; i < dd->num_send_contexts; i++) {
6605 sc = dd->send_contexts[i].sc;
6606 if (sc && (sc->flags & SCF_ENABLED))
6607 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6608 }
6609
6610 /* Send context are frozen. Notify user space */
6611 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6612
6613 if (flags & FREEZE_ABORT) {
6614 dd_dev_err(dd,
6615 "Aborted freeze recovery. Please REBOOT system\n");
6616 return;
6617 }
6618 /* queue non-interrupt handler */
6619 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6620}
6621
6622/*
6623 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6624 * depending on the "freeze" parameter.
6625 *
6626 * No need to return an error if it times out, our only option
6627 * is to proceed anyway.
6628 */
6629static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6630{
6631 unsigned long timeout;
6632 u64 reg;
6633
6634 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6635 while (1) {
6636 reg = read_csr(dd, CCE_STATUS);
6637 if (freeze) {
6638 /* waiting until all indicators are set */
6639 if ((reg & ALL_FROZE) == ALL_FROZE)
6640 return; /* all done */
6641 } else {
6642 /* waiting until all indicators are clear */
6643 if ((reg & ALL_FROZE) == 0)
6644 return; /* all done */
6645 }
6646
6647 if (time_after(jiffies, timeout)) {
6648 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006649 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6650 freeze ? "" : "un", reg & ALL_FROZE,
6651 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006652 return;
6653 }
6654 usleep_range(80, 120);
6655 }
6656}
6657
6658/*
6659 * Do all freeze handling for the RXE block.
6660 */
6661static void rxe_freeze(struct hfi1_devdata *dd)
6662{
6663 int i;
6664
6665 /* disable port */
6666 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6667
6668 /* disable all receive contexts */
6669 for (i = 0; i < dd->num_rcv_contexts; i++)
6670 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6671}
6672
6673/*
6674 * Unfreeze handling for the RXE block - kernel contexts only.
6675 * This will also enable the port. User contexts will do unfreeze
6676 * handling on a per-context basis as they call into the driver.
6677 *
6678 */
6679static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6680{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006681 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006682 int i;
6683
6684 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006685 for (i = 0; i < dd->n_krcv_queues; i++) {
6686 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6687 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6688 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6689 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6690 hfi1_rcvctrl(dd, rcvmask, i);
6691 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006692
6693 /* enable port */
6694 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6695}
6696
6697/*
6698 * Non-interrupt SPC freeze handling.
6699 *
6700 * This is a work-queue function outside of the triggering interrupt.
6701 */
6702void handle_freeze(struct work_struct *work)
6703{
6704 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6705 freeze_work);
6706 struct hfi1_devdata *dd = ppd->dd;
6707
6708 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006709 wait_for_freeze_status(dd, 1);
6710
6711 /* SPC is now frozen */
6712
6713 /* do send PIO freeze steps */
6714 pio_freeze(dd);
6715
6716 /* do send DMA freeze steps */
6717 sdma_freeze(dd);
6718
6719 /* do send egress freeze steps - nothing to do */
6720
6721 /* do receive freeze steps */
6722 rxe_freeze(dd);
6723
6724 /*
6725 * Unfreeze the hardware - clear the freeze, wait for each
6726 * block's frozen bit to clear, then clear the frozen flag.
6727 */
6728 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6729 wait_for_freeze_status(dd, 0);
6730
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006731 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006732 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6733 wait_for_freeze_status(dd, 1);
6734 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6735 wait_for_freeze_status(dd, 0);
6736 }
6737
6738 /* do send PIO unfreeze steps for kernel contexts */
6739 pio_kernel_unfreeze(dd);
6740
6741 /* do send DMA unfreeze steps */
6742 sdma_unfreeze(dd);
6743
6744 /* do send egress unfreeze steps - nothing to do */
6745
6746 /* do receive unfreeze steps for kernel contexts */
6747 rxe_kernel_unfreeze(dd);
6748
6749 /*
6750 * The unfreeze procedure touches global device registers when
6751 * it disables and re-enables RXE. Mark the device unfrozen
6752 * after all that is done so other parts of the driver waiting
6753 * for the device to unfreeze don't do things out of order.
6754 *
6755 * The above implies that the meaning of HFI1_FROZEN flag is
6756 * "Device has gone into freeze mode and freeze mode handling
6757 * is still in progress."
6758 *
6759 * The flag will be removed when freeze mode processing has
6760 * completed.
6761 */
6762 dd->flags &= ~HFI1_FROZEN;
6763 wake_up(&dd->event_queue);
6764
6765 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006766}
6767
6768/*
6769 * Handle a link up interrupt from the 8051.
6770 *
6771 * This is a work-queue function outside of the interrupt.
6772 */
6773void handle_link_up(struct work_struct *work)
6774{
6775 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006776 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006777 set_link_state(ppd, HLS_UP_INIT);
6778
6779 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6780 read_ltp_rtt(ppd->dd);
6781 /*
6782 * OPA specifies that certain counters are cleared on a transition
6783 * to link up, so do that.
6784 */
6785 clear_linkup_counters(ppd->dd);
6786 /*
6787 * And (re)set link up default values.
6788 */
6789 set_linkup_defaults(ppd);
6790
6791 /* enforce link speed enabled */
6792 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6793 /* oops - current speed is not enabled, bounce */
6794 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006795 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6796 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006797 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006798 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006799 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006800 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006801 start_link(ppd);
6802 }
6803}
6804
Jubin John4d114fd2016-02-14 20:21:43 -08006805/*
6806 * Several pieces of LNI information were cached for SMA in ppd.
6807 * Reset these on link down
6808 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006809static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6810{
6811 ppd->neighbor_guid = 0;
6812 ppd->neighbor_port_number = 0;
6813 ppd->neighbor_type = 0;
6814 ppd->neighbor_fm_security = 0;
6815}
6816
Dean Luickfeb831d2016-04-14 08:31:36 -07006817static const char * const link_down_reason_strs[] = {
6818 [OPA_LINKDOWN_REASON_NONE] = "None",
6819 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6820 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6821 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6822 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6823 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6824 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6825 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6826 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6827 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6828 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6829 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6830 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6831 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6832 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6833 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6834 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6835 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6836 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6837 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6838 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6839 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6840 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6841 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6842 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6843 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6844 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6848 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6849 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6850 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6851 "Excessive buffer overrun",
6852 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6853 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6854 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6855 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6856 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6857 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6858 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6859 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6860 "Local media not installed",
6861 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6862 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6863 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6864 "End to end not installed",
6865 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6866 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6867 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6868 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6869 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6870 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6871};
6872
6873/* return the neighbor link down reason string */
6874static const char *link_down_reason_str(u8 reason)
6875{
6876 const char *str = NULL;
6877
6878 if (reason < ARRAY_SIZE(link_down_reason_strs))
6879 str = link_down_reason_strs[reason];
6880 if (!str)
6881 str = "(invalid)";
6882
6883 return str;
6884}
6885
Mike Marciniszyn77241052015-07-30 15:17:43 -04006886/*
6887 * Handle a link down interrupt from the 8051.
6888 *
6889 * This is a work-queue function outside of the interrupt.
6890 */
6891void handle_link_down(struct work_struct *work)
6892{
6893 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006894 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006895 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006896 link_down_work);
6897 int was_up;
6898 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006899
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006900 if ((ppd->host_link_state &
6901 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6902 ppd->port_type == PORT_TYPE_FIXED)
6903 ppd->offline_disabled_reason =
6904 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6905
6906 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006907 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006908 set_link_state(ppd, HLS_DN_OFFLINE);
6909
Dean Luickfeb831d2016-04-14 08:31:36 -07006910 if (was_up) {
6911 lcl_reason = 0;
6912 /* link down reason is only valid if the link was up */
6913 read_link_down_reason(ppd->dd, &link_down_reason);
6914 switch (link_down_reason) {
6915 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6916 /* the link went down, no idle message reason */
6917 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6918 ldr_str);
6919 break;
6920 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6921 /*
6922 * The neighbor reason is only valid if an idle message
6923 * was received for it.
6924 */
6925 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6926 dd_dev_info(ppd->dd,
6927 "%sNeighbor link down message %d, %s\n",
6928 ldr_str, neigh_reason,
6929 link_down_reason_str(neigh_reason));
6930 break;
6931 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6932 dd_dev_info(ppd->dd,
6933 "%sHost requested link to go offline\n",
6934 ldr_str);
6935 break;
6936 default:
6937 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6938 ldr_str, link_down_reason);
6939 break;
6940 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006941
Dean Luickfeb831d2016-04-14 08:31:36 -07006942 /*
6943 * If no reason, assume peer-initiated but missed
6944 * LinkGoingDown idle flits.
6945 */
6946 if (neigh_reason == 0)
6947 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6948 } else {
6949 /* went down while polling or going up */
6950 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6951 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006952
6953 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6954
Dean Luick015e91f2016-04-14 08:31:42 -07006955 /* inform the SMA when the link transitions from up to down */
6956 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6957 ppd->neigh_link_down_reason.sma == 0) {
6958 ppd->local_link_down_reason.sma =
6959 ppd->local_link_down_reason.latest;
6960 ppd->neigh_link_down_reason.sma =
6961 ppd->neigh_link_down_reason.latest;
6962 }
6963
Mike Marciniszyn77241052015-07-30 15:17:43 -04006964 reset_neighbor_info(ppd);
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07006965 if (ppd->mgmt_allowed)
6966 remove_full_mgmt_pkey(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006967
6968 /* disable the port */
6969 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6970
Jubin John4d114fd2016-02-14 20:21:43 -08006971 /*
6972 * If there is no cable attached, turn the DC off. Otherwise,
6973 * start the link bring up.
6974 */
Easwar Hariharan623bba22016-04-12 11:25:57 -07006975 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006976 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006977 } else {
6978 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006979 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006980 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006981}
6982
6983void handle_link_bounce(struct work_struct *work)
6984{
6985 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6986 link_bounce_work);
6987
6988 /*
6989 * Only do something if the link is currently up.
6990 */
6991 if (ppd->host_link_state & HLS_UP) {
6992 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006993 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006994 start_link(ppd);
6995 } else {
6996 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006997 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006998 }
6999}
7000
7001/*
7002 * Mask conversion: Capability exchange to Port LTP. The capability
7003 * exchange has an implicit 16b CRC that is mandatory.
7004 */
7005static int cap_to_port_ltp(int cap)
7006{
7007 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7008
7009 if (cap & CAP_CRC_14B)
7010 port_ltp |= PORT_LTP_CRC_MODE_14;
7011 if (cap & CAP_CRC_48B)
7012 port_ltp |= PORT_LTP_CRC_MODE_48;
7013 if (cap & CAP_CRC_12B_16B_PER_LANE)
7014 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7015
7016 return port_ltp;
7017}
7018
7019/*
7020 * Convert an OPA Port LTP mask to capability mask
7021 */
7022int port_ltp_to_cap(int port_ltp)
7023{
7024 int cap_mask = 0;
7025
7026 if (port_ltp & PORT_LTP_CRC_MODE_14)
7027 cap_mask |= CAP_CRC_14B;
7028 if (port_ltp & PORT_LTP_CRC_MODE_48)
7029 cap_mask |= CAP_CRC_48B;
7030 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7031 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7032
7033 return cap_mask;
7034}
7035
7036/*
7037 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7038 */
7039static int lcb_to_port_ltp(int lcb_crc)
7040{
7041 int port_ltp = 0;
7042
7043 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7044 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7045 else if (lcb_crc == LCB_CRC_48B)
7046 port_ltp = PORT_LTP_CRC_MODE_48;
7047 else if (lcb_crc == LCB_CRC_14B)
7048 port_ltp = PORT_LTP_CRC_MODE_14;
7049 else
7050 port_ltp = PORT_LTP_CRC_MODE_16;
7051
7052 return port_ltp;
7053}
7054
7055/*
7056 * Our neighbor has indicated that we are allowed to act as a fabric
7057 * manager, so place the full management partition key in the second
7058 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7059 * that we should already have the limited management partition key in
7060 * array element 1, and also that the port is not yet up when
7061 * add_full_mgmt_pkey() is invoked.
7062 */
7063static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7064{
7065 struct hfi1_devdata *dd = ppd->dd;
7066
Dean Luick87645222015-12-01 15:38:21 -05007067 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7068 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7069 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7070 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007071 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7072 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7073}
7074
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007075static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7076{
7077 ppd->pkeys[2] = 0;
7078 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7079}
7080
Mike Marciniszyn77241052015-07-30 15:17:43 -04007081/*
7082 * Convert the given link width to the OPA link width bitmask.
7083 */
7084static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7085{
7086 switch (width) {
7087 case 0:
7088 /*
7089 * Simulator and quick linkup do not set the width.
7090 * Just set it to 4x without complaint.
7091 */
7092 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7093 return OPA_LINK_WIDTH_4X;
7094 return 0; /* no lanes up */
7095 case 1: return OPA_LINK_WIDTH_1X;
7096 case 2: return OPA_LINK_WIDTH_2X;
7097 case 3: return OPA_LINK_WIDTH_3X;
7098 default:
7099 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007100 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007101 /* fall through */
7102 case 4: return OPA_LINK_WIDTH_4X;
7103 }
7104}
7105
7106/*
7107 * Do a population count on the bottom nibble.
7108 */
7109static const u8 bit_counts[16] = {
7110 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7111};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007112
Mike Marciniszyn77241052015-07-30 15:17:43 -04007113static inline u8 nibble_to_count(u8 nibble)
7114{
7115 return bit_counts[nibble & 0xf];
7116}
7117
7118/*
7119 * Read the active lane information from the 8051 registers and return
7120 * their widths.
7121 *
7122 * Active lane information is found in these 8051 registers:
7123 * enable_lane_tx
7124 * enable_lane_rx
7125 */
7126static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7127 u16 *rx_width)
7128{
7129 u16 tx, rx;
7130 u8 enable_lane_rx;
7131 u8 enable_lane_tx;
7132 u8 tx_polarity_inversion;
7133 u8 rx_polarity_inversion;
7134 u8 max_rate;
7135
7136 /* read the active lanes */
7137 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007138 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007139 read_local_lni(dd, &enable_lane_rx);
7140
7141 /* convert to counts */
7142 tx = nibble_to_count(enable_lane_tx);
7143 rx = nibble_to_count(enable_lane_rx);
7144
7145 /*
7146 * Set link_speed_active here, overriding what was set in
7147 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7148 * set the max_rate field in handle_verify_cap until v0.19.
7149 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007150 if ((dd->icode == ICODE_RTL_SILICON) &&
7151 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007152 /* max_rate: 0 = 12.5G, 1 = 25G */
7153 switch (max_rate) {
7154 case 0:
7155 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7156 break;
7157 default:
7158 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007159 "%s: unexpected max rate %d, using 25Gb\n",
7160 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007161 /* fall through */
7162 case 1:
7163 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7164 break;
7165 }
7166 }
7167
7168 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007169 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7170 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007171 *tx_width = link_width_to_bits(dd, tx);
7172 *rx_width = link_width_to_bits(dd, rx);
7173}
7174
7175/*
7176 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7177 * Valid after the end of VerifyCap and during LinkUp. Does not change
7178 * after link up. I.e. look elsewhere for downgrade information.
7179 *
7180 * Bits are:
7181 * + bits [7:4] contain the number of active transmitters
7182 * + bits [3:0] contain the number of active receivers
7183 * These are numbers 1 through 4 and can be different values if the
7184 * link is asymmetric.
7185 *
7186 * verify_cap_local_fm_link_width[0] retains its original value.
7187 */
7188static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7189 u16 *rx_width)
7190{
7191 u16 widths, tx, rx;
7192 u8 misc_bits, local_flags;
7193 u16 active_tx, active_rx;
7194
7195 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7196 tx = widths >> 12;
7197 rx = (widths >> 8) & 0xf;
7198
7199 *tx_width = link_width_to_bits(dd, tx);
7200 *rx_width = link_width_to_bits(dd, rx);
7201
7202 /* print the active widths */
7203 get_link_widths(dd, &active_tx, &active_rx);
7204}
7205
7206/*
7207 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7208 * hardware information when the link first comes up.
7209 *
7210 * The link width is not available until after VerifyCap.AllFramesReceived
7211 * (the trigger for handle_verify_cap), so this is outside that routine
7212 * and should be called when the 8051 signals linkup.
7213 */
7214void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7215{
7216 u16 tx_width, rx_width;
7217
7218 /* get end-of-LNI link widths */
7219 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7220
7221 /* use tx_width as the link is supposed to be symmetric on link up */
7222 ppd->link_width_active = tx_width;
7223 /* link width downgrade active (LWD.A) starts out matching LW.A */
7224 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7225 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7226 /* per OPA spec, on link up LWD.E resets to LWD.S */
7227 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7228 /* cache the active egress rate (units {10^6 bits/sec]) */
7229 ppd->current_egress_rate = active_egress_rate(ppd);
7230}
7231
7232/*
7233 * Handle a verify capabilities interrupt from the 8051.
7234 *
7235 * This is a work-queue function outside of the interrupt.
7236 */
7237void handle_verify_cap(struct work_struct *work)
7238{
7239 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7240 link_vc_work);
7241 struct hfi1_devdata *dd = ppd->dd;
7242 u64 reg;
7243 u8 power_management;
7244 u8 continious;
7245 u8 vcu;
7246 u8 vau;
7247 u8 z;
7248 u16 vl15buf;
7249 u16 link_widths;
7250 u16 crc_mask;
7251 u16 crc_val;
7252 u16 device_id;
7253 u16 active_tx, active_rx;
7254 u8 partner_supported_crc;
7255 u8 remote_tx_rate;
7256 u8 device_rev;
7257
7258 set_link_state(ppd, HLS_VERIFY_CAP);
7259
7260 lcb_shutdown(dd, 0);
7261 adjust_lcb_for_fpga_serdes(dd);
7262
7263 /*
7264 * These are now valid:
7265 * remote VerifyCap fields in the general LNI config
7266 * CSR DC8051_STS_REMOTE_GUID
7267 * CSR DC8051_STS_REMOTE_NODE_TYPE
7268 * CSR DC8051_STS_REMOTE_FM_SECURITY
7269 * CSR DC8051_STS_REMOTE_PORT_NO
7270 */
7271
7272 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007273 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7274 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007275 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7276 read_remote_device_id(dd, &device_id, &device_rev);
7277 /*
7278 * And the 'MgmtAllowed' information, which is exchanged during
7279 * LNI, is also be available at this point.
7280 */
7281 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7282 /* print the active widths */
7283 get_link_widths(dd, &active_tx, &active_rx);
7284 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007285 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7286 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007287 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007288 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7289 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7290 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007291 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007292 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007293 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007294 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007295 /*
7296 * The peer vAU value just read is the peer receiver value. HFI does
7297 * not support a transmit vAU of 0 (AU == 8). We advertised that
7298 * with Z=1 in the fabric capabilities sent to the peer. The peer
7299 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7300 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7301 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7302 * subject to the Z value exception.
7303 */
7304 if (vau == 0)
7305 vau = 1;
7306 set_up_vl15(dd, vau, vl15buf);
7307
7308 /* set up the LCB CRC mode */
7309 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7310
7311 /* order is important: use the lowest bit in common */
7312 if (crc_mask & CAP_CRC_14B)
7313 crc_val = LCB_CRC_14B;
7314 else if (crc_mask & CAP_CRC_48B)
7315 crc_val = LCB_CRC_48B;
7316 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7317 crc_val = LCB_CRC_12B_16B_PER_LANE;
7318 else
7319 crc_val = LCB_CRC_16B;
7320
7321 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7322 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7323 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7324
7325 /* set (14b only) or clear sideband credit */
7326 reg = read_csr(dd, SEND_CM_CTRL);
7327 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7328 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007329 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007330 } else {
7331 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007332 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007333 }
7334
7335 ppd->link_speed_active = 0; /* invalid value */
7336 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7337 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7338 switch (remote_tx_rate) {
7339 case 0:
7340 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7341 break;
7342 case 1:
7343 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7344 break;
7345 }
7346 } else {
7347 /* actual rate is highest bit of the ANDed rates */
7348 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7349
7350 if (rate & 2)
7351 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7352 else if (rate & 1)
7353 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7354 }
7355 if (ppd->link_speed_active == 0) {
7356 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007357 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007358 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7359 }
7360
7361 /*
7362 * Cache the values of the supported, enabled, and active
7363 * LTP CRC modes to return in 'portinfo' queries. But the bit
7364 * flags that are returned in the portinfo query differ from
7365 * what's in the link_crc_mask, crc_sizes, and crc_val
7366 * variables. Convert these here.
7367 */
7368 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7369 /* supported crc modes */
7370 ppd->port_ltp_crc_mode |=
7371 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7372 /* enabled crc modes */
7373 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7374 /* active crc mode */
7375
7376 /* set up the remote credit return table */
7377 assign_remote_cm_au_table(dd, vcu);
7378
7379 /*
7380 * The LCB is reset on entry to handle_verify_cap(), so this must
7381 * be applied on every link up.
7382 *
7383 * Adjust LCB error kill enable to kill the link if
7384 * these RBUF errors are seen:
7385 * REPLAY_BUF_MBE_SMASK
7386 * FLIT_INPUT_BUF_MBE_SMASK
7387 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007388 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007389 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7390 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7391 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7392 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7393 }
7394
7395 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7396 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7397
7398 /* give 8051 access to the LCB CSRs */
7399 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7400 set_8051_lcb_access(dd);
7401
7402 ppd->neighbor_guid =
7403 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7404 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7405 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7406 ppd->neighbor_type =
7407 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7408 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7409 ppd->neighbor_fm_security =
7410 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7411 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7412 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007413 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7414 ppd->neighbor_guid, ppd->neighbor_type,
7415 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007416 if (ppd->mgmt_allowed)
7417 add_full_mgmt_pkey(ppd);
7418
7419 /* tell the 8051 to go to LinkUp */
7420 set_link_state(ppd, HLS_GOING_UP);
7421}
7422
7423/*
7424 * Apply the link width downgrade enabled policy against the current active
7425 * link widths.
7426 *
7427 * Called when the enabled policy changes or the active link widths change.
7428 */
7429void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7430{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007431 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007432 int tries;
7433 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007434 u16 tx, rx;
7435
Dean Luick323fd782015-11-16 21:59:24 -05007436 /* use the hls lock to avoid a race with actual link up */
7437 tries = 0;
7438retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007439 mutex_lock(&ppd->hls_lock);
7440 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007441 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007442 /* still going up..wait and retry */
7443 if (ppd->host_link_state & HLS_GOING_UP) {
7444 if (++tries < 1000) {
7445 mutex_unlock(&ppd->hls_lock);
7446 usleep_range(100, 120); /* arbitrary */
7447 goto retry;
7448 }
7449 dd_dev_err(ppd->dd,
7450 "%s: giving up waiting for link state change\n",
7451 __func__);
7452 }
7453 goto done;
7454 }
7455
7456 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007457
7458 if (refresh_widths) {
7459 get_link_widths(ppd->dd, &tx, &rx);
7460 ppd->link_width_downgrade_tx_active = tx;
7461 ppd->link_width_downgrade_rx_active = rx;
7462 }
7463
Dean Luickf9b56352016-04-14 08:31:30 -07007464 if (ppd->link_width_downgrade_tx_active == 0 ||
7465 ppd->link_width_downgrade_rx_active == 0) {
7466 /* the 8051 reported a dead link as a downgrade */
7467 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7468 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007469 /* downgrade is disabled */
7470
7471 /* bounce if not at starting active width */
7472 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007473 ppd->link_width_downgrade_tx_active) ||
7474 (ppd->link_width_active !=
7475 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007476 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007477 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007478 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007479 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7480 ppd->link_width_active,
7481 ppd->link_width_downgrade_tx_active,
7482 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007483 do_bounce = 1;
7484 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007485 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7486 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007487 /* Tx or Rx is outside the enabled policy */
7488 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007489 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007490 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007491 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7492 lwde, ppd->link_width_downgrade_tx_active,
7493 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007494 do_bounce = 1;
7495 }
7496
Dean Luick323fd782015-11-16 21:59:24 -05007497done:
7498 mutex_unlock(&ppd->hls_lock);
7499
Mike Marciniszyn77241052015-07-30 15:17:43 -04007500 if (do_bounce) {
7501 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007502 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007503 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007504 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007505 start_link(ppd);
7506 }
7507}
7508
7509/*
7510 * Handle a link downgrade interrupt from the 8051.
7511 *
7512 * This is a work-queue function outside of the interrupt.
7513 */
7514void handle_link_downgrade(struct work_struct *work)
7515{
7516 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7517 link_downgrade_work);
7518
7519 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7520 apply_link_downgrade_policy(ppd, 1);
7521}
7522
7523static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7524{
7525 return flag_string(buf, buf_len, flags, dcc_err_flags,
7526 ARRAY_SIZE(dcc_err_flags));
7527}
7528
7529static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7530{
7531 return flag_string(buf, buf_len, flags, lcb_err_flags,
7532 ARRAY_SIZE(lcb_err_flags));
7533}
7534
7535static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7536{
7537 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7538 ARRAY_SIZE(dc8051_err_flags));
7539}
7540
7541static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7542{
7543 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7544 ARRAY_SIZE(dc8051_info_err_flags));
7545}
7546
7547static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7548{
7549 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7550 ARRAY_SIZE(dc8051_info_host_msg_flags));
7551}
7552
7553static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7554{
7555 struct hfi1_pportdata *ppd = dd->pport;
7556 u64 info, err, host_msg;
7557 int queue_link_down = 0;
7558 char buf[96];
7559
7560 /* look at the flags */
7561 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7562 /* 8051 information set by firmware */
7563 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7564 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7565 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7566 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7567 host_msg = (info >>
7568 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7569 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7570
7571 /*
7572 * Handle error flags.
7573 */
7574 if (err & FAILED_LNI) {
7575 /*
7576 * LNI error indications are cleared by the 8051
7577 * only when starting polling. Only pay attention
7578 * to them when in the states that occur during
7579 * LNI.
7580 */
7581 if (ppd->host_link_state
7582 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7583 queue_link_down = 1;
7584 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007585 dc8051_info_err_string(buf,
7586 sizeof(buf),
7587 err &
7588 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007589 }
7590 err &= ~(u64)FAILED_LNI;
7591 }
Dean Luick6d014532015-12-01 15:38:23 -05007592 /* unknown frames can happen durning LNI, just count */
7593 if (err & UNKNOWN_FRAME) {
7594 ppd->unknown_frame_count++;
7595 err &= ~(u64)UNKNOWN_FRAME;
7596 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007597 if (err) {
7598 /* report remaining errors, but do not do anything */
7599 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007600 dc8051_info_err_string(buf, sizeof(buf),
7601 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007602 }
7603
7604 /*
7605 * Handle host message flags.
7606 */
7607 if (host_msg & HOST_REQ_DONE) {
7608 /*
7609 * Presently, the driver does a busy wait for
7610 * host requests to complete. This is only an
7611 * informational message.
7612 * NOTE: The 8051 clears the host message
7613 * information *on the next 8051 command*.
7614 * Therefore, when linkup is achieved,
7615 * this flag will still be set.
7616 */
7617 host_msg &= ~(u64)HOST_REQ_DONE;
7618 }
7619 if (host_msg & BC_SMA_MSG) {
7620 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7621 host_msg &= ~(u64)BC_SMA_MSG;
7622 }
7623 if (host_msg & LINKUP_ACHIEVED) {
7624 dd_dev_info(dd, "8051: Link up\n");
7625 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7626 host_msg &= ~(u64)LINKUP_ACHIEVED;
7627 }
7628 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007629 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007630 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7631 }
7632 if (host_msg & VERIFY_CAP_FRAME) {
7633 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7634 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7635 }
7636 if (host_msg & LINK_GOING_DOWN) {
7637 const char *extra = "";
7638 /* no downgrade action needed if going down */
7639 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7640 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7641 extra = " (ignoring downgrade)";
7642 }
7643 dd_dev_info(dd, "8051: Link down%s\n", extra);
7644 queue_link_down = 1;
7645 host_msg &= ~(u64)LINK_GOING_DOWN;
7646 }
7647 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7648 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7649 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7650 }
7651 if (host_msg) {
7652 /* report remaining messages, but do not do anything */
7653 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007654 dc8051_info_host_msg_string(buf,
7655 sizeof(buf),
7656 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007657 }
7658
7659 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7660 }
7661 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7662 /*
7663 * Lost the 8051 heartbeat. If this happens, we
7664 * receive constant interrupts about it. Disable
7665 * the interrupt after the first.
7666 */
7667 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7668 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007669 read_csr(dd, DC_DC8051_ERR_EN) &
7670 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007671
7672 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7673 }
7674 if (reg) {
7675 /* report the error, but do not do anything */
7676 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007677 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007678 }
7679
7680 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007681 /*
7682 * if the link is already going down or disabled, do not
7683 * queue another
7684 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007685 if ((ppd->host_link_state &
7686 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7687 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007688 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007689 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007690 } else {
7691 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7692 }
7693 }
7694}
7695
7696static const char * const fm_config_txt[] = {
7697[0] =
7698 "BadHeadDist: Distance violation between two head flits",
7699[1] =
7700 "BadTailDist: Distance violation between two tail flits",
7701[2] =
7702 "BadCtrlDist: Distance violation between two credit control flits",
7703[3] =
7704 "BadCrdAck: Credits return for unsupported VL",
7705[4] =
7706 "UnsupportedVLMarker: Received VL Marker",
7707[5] =
7708 "BadPreempt: Exceeded the preemption nesting level",
7709[6] =
7710 "BadControlFlit: Received unsupported control flit",
7711/* no 7 */
7712[8] =
7713 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7714};
7715
7716static const char * const port_rcv_txt[] = {
7717[1] =
7718 "BadPktLen: Illegal PktLen",
7719[2] =
7720 "PktLenTooLong: Packet longer than PktLen",
7721[3] =
7722 "PktLenTooShort: Packet shorter than PktLen",
7723[4] =
7724 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7725[5] =
7726 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7727[6] =
7728 "BadL2: Illegal L2 opcode",
7729[7] =
7730 "BadSC: Unsupported SC",
7731[9] =
7732 "BadRC: Illegal RC",
7733[11] =
7734 "PreemptError: Preempting with same VL",
7735[12] =
7736 "PreemptVL15: Preempting a VL15 packet",
7737};
7738
7739#define OPA_LDR_FMCONFIG_OFFSET 16
7740#define OPA_LDR_PORTRCV_OFFSET 0
7741static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7742{
7743 u64 info, hdr0, hdr1;
7744 const char *extra;
7745 char buf[96];
7746 struct hfi1_pportdata *ppd = dd->pport;
7747 u8 lcl_reason = 0;
7748 int do_bounce = 0;
7749
7750 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7751 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7752 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7753 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7754 /* set status bit */
7755 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7756 }
7757 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7758 }
7759
7760 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7761 struct hfi1_pportdata *ppd = dd->pport;
7762 /* this counter saturates at (2^32) - 1 */
7763 if (ppd->link_downed < (u32)UINT_MAX)
7764 ppd->link_downed++;
7765 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7766 }
7767
7768 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7769 u8 reason_valid = 1;
7770
7771 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7772 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7773 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7774 /* set status bit */
7775 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7776 }
7777 switch (info) {
7778 case 0:
7779 case 1:
7780 case 2:
7781 case 3:
7782 case 4:
7783 case 5:
7784 case 6:
7785 extra = fm_config_txt[info];
7786 break;
7787 case 8:
7788 extra = fm_config_txt[info];
7789 if (ppd->port_error_action &
7790 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7791 do_bounce = 1;
7792 /*
7793 * lcl_reason cannot be derived from info
7794 * for this error
7795 */
7796 lcl_reason =
7797 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7798 }
7799 break;
7800 default:
7801 reason_valid = 0;
7802 snprintf(buf, sizeof(buf), "reserved%lld", info);
7803 extra = buf;
7804 break;
7805 }
7806
7807 if (reason_valid && !do_bounce) {
7808 do_bounce = ppd->port_error_action &
7809 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7810 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7811 }
7812
7813 /* just report this */
7814 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7815 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7816 }
7817
7818 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7819 u8 reason_valid = 1;
7820
7821 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7822 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7823 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7824 if (!(dd->err_info_rcvport.status_and_code &
7825 OPA_EI_STATUS_SMASK)) {
7826 dd->err_info_rcvport.status_and_code =
7827 info & OPA_EI_CODE_SMASK;
7828 /* set status bit */
7829 dd->err_info_rcvport.status_and_code |=
7830 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007831 /*
7832 * save first 2 flits in the packet that caused
7833 * the error
7834 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007835 dd->err_info_rcvport.packet_flit1 = hdr0;
7836 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007837 }
7838 switch (info) {
7839 case 1:
7840 case 2:
7841 case 3:
7842 case 4:
7843 case 5:
7844 case 6:
7845 case 7:
7846 case 9:
7847 case 11:
7848 case 12:
7849 extra = port_rcv_txt[info];
7850 break;
7851 default:
7852 reason_valid = 0;
7853 snprintf(buf, sizeof(buf), "reserved%lld", info);
7854 extra = buf;
7855 break;
7856 }
7857
7858 if (reason_valid && !do_bounce) {
7859 do_bounce = ppd->port_error_action &
7860 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7861 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7862 }
7863
7864 /* just report this */
7865 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7866 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007867 hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007868
7869 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7870 }
7871
7872 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7873 /* informative only */
7874 dd_dev_info(dd, "8051 access to LCB blocked\n");
7875 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7876 }
7877 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7878 /* informative only */
7879 dd_dev_info(dd, "host access to LCB blocked\n");
7880 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7881 }
7882
7883 /* report any remaining errors */
7884 if (reg)
7885 dd_dev_info(dd, "DCC Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007886 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007887
7888 if (lcl_reason == 0)
7889 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7890
7891 if (do_bounce) {
7892 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7893 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7894 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7895 }
7896}
7897
7898static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7899{
7900 char buf[96];
7901
7902 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007903 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007904}
7905
7906/*
7907 * CCE block DC interrupt. Source is < 8.
7908 */
7909static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7910{
7911 const struct err_reg_info *eri = &dc_errs[source];
7912
7913 if (eri->handler) {
7914 interrupt_clear_down(dd, 0, eri);
7915 } else if (source == 3 /* dc_lbm_int */) {
7916 /*
7917 * This indicates that a parity error has occurred on the
7918 * address/control lines presented to the LBM. The error
7919 * is a single pulse, there is no associated error flag,
7920 * and it is non-maskable. This is because if a parity
7921 * error occurs on the request the request is dropped.
7922 * This should never occur, but it is nice to know if it
7923 * ever does.
7924 */
7925 dd_dev_err(dd, "Parity error in DC LBM block\n");
7926 } else {
7927 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7928 }
7929}
7930
7931/*
7932 * TX block send credit interrupt. Source is < 160.
7933 */
7934static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7935{
7936 sc_group_release_update(dd, source);
7937}
7938
7939/*
7940 * TX block SDMA interrupt. Source is < 48.
7941 *
7942 * SDMA interrupts are grouped by type:
7943 *
7944 * 0 - N-1 = SDma
7945 * N - 2N-1 = SDmaProgress
7946 * 2N - 3N-1 = SDmaIdle
7947 */
7948static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7949{
7950 /* what interrupt */
7951 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7952 /* which engine */
7953 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7954
7955#ifdef CONFIG_SDMA_VERBOSITY
7956 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7957 slashstrip(__FILE__), __LINE__, __func__);
7958 sdma_dumpstate(&dd->per_sdma[which]);
7959#endif
7960
7961 if (likely(what < 3 && which < dd->num_sdma)) {
7962 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7963 } else {
7964 /* should not happen */
7965 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7966 }
7967}
7968
7969/*
7970 * RX block receive available interrupt. Source is < 160.
7971 */
7972static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7973{
7974 struct hfi1_ctxtdata *rcd;
7975 char *err_detail;
7976
7977 if (likely(source < dd->num_rcv_contexts)) {
7978 rcd = dd->rcd[source];
7979 if (rcd) {
7980 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007981 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007982 else
7983 handle_user_interrupt(rcd);
7984 return; /* OK */
7985 }
7986 /* received an interrupt, but no rcd */
7987 err_detail = "dataless";
7988 } else {
7989 /* received an interrupt, but are not using that context */
7990 err_detail = "out of range";
7991 }
7992 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007993 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007994}
7995
7996/*
7997 * RX block receive urgent interrupt. Source is < 160.
7998 */
7999static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8000{
8001 struct hfi1_ctxtdata *rcd;
8002 char *err_detail;
8003
8004 if (likely(source < dd->num_rcv_contexts)) {
8005 rcd = dd->rcd[source];
8006 if (rcd) {
8007 /* only pay attention to user urgent interrupts */
8008 if (source >= dd->first_user_ctxt)
8009 handle_user_interrupt(rcd);
8010 return; /* OK */
8011 }
8012 /* received an interrupt, but no rcd */
8013 err_detail = "dataless";
8014 } else {
8015 /* received an interrupt, but are not using that context */
8016 err_detail = "out of range";
8017 }
8018 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008019 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008020}
8021
8022/*
8023 * Reserved range interrupt. Should not be called in normal operation.
8024 */
8025static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8026{
8027 char name[64];
8028
8029 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008030 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008031}
8032
8033static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008034/*
8035 * start end
8036 * name func interrupt func
8037 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008038{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8039 is_misc_err_name, is_misc_err_int },
8040{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8041 is_sdma_eng_err_name, is_sdma_eng_err_int },
8042{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8043 is_sendctxt_err_name, is_sendctxt_err_int },
8044{ IS_SDMA_START, IS_SDMA_END,
8045 is_sdma_eng_name, is_sdma_eng_int },
8046{ IS_VARIOUS_START, IS_VARIOUS_END,
8047 is_various_name, is_various_int },
8048{ IS_DC_START, IS_DC_END,
8049 is_dc_name, is_dc_int },
8050{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8051 is_rcv_avail_name, is_rcv_avail_int },
8052{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8053 is_rcv_urgent_name, is_rcv_urgent_int },
8054{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8055 is_send_credit_name, is_send_credit_int},
8056{ IS_RESERVED_START, IS_RESERVED_END,
8057 is_reserved_name, is_reserved_int},
8058};
8059
8060/*
8061 * Interrupt source interrupt - called when the given source has an interrupt.
8062 * Source is a bit index into an array of 64-bit integers.
8063 */
8064static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8065{
8066 const struct is_table *entry;
8067
8068 /* avoids a double compare by walking the table in-order */
8069 for (entry = &is_table[0]; entry->is_name; entry++) {
8070 if (source < entry->end) {
8071 trace_hfi1_interrupt(dd, entry, source);
8072 entry->is_int(dd, source - entry->start);
8073 return;
8074 }
8075 }
8076 /* fell off the end */
8077 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8078}
8079
8080/*
8081 * General interrupt handler. This is able to correctly handle
8082 * all interrupts in case INTx is used.
8083 */
8084static irqreturn_t general_interrupt(int irq, void *data)
8085{
8086 struct hfi1_devdata *dd = data;
8087 u64 regs[CCE_NUM_INT_CSRS];
8088 u32 bit;
8089 int i;
8090
8091 this_cpu_inc(*dd->int_counter);
8092
8093 /* phase 1: scan and clear all handled interrupts */
8094 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8095 if (dd->gi_mask[i] == 0) {
8096 regs[i] = 0; /* used later */
8097 continue;
8098 }
8099 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8100 dd->gi_mask[i];
8101 /* only clear if anything is set */
8102 if (regs[i])
8103 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8104 }
8105
8106 /* phase 2: call the appropriate handler */
8107 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008108 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008109 is_interrupt(dd, bit);
8110 }
8111
8112 return IRQ_HANDLED;
8113}
8114
8115static irqreturn_t sdma_interrupt(int irq, void *data)
8116{
8117 struct sdma_engine *sde = data;
8118 struct hfi1_devdata *dd = sde->dd;
8119 u64 status;
8120
8121#ifdef CONFIG_SDMA_VERBOSITY
8122 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8123 slashstrip(__FILE__), __LINE__, __func__);
8124 sdma_dumpstate(sde);
8125#endif
8126
8127 this_cpu_inc(*dd->int_counter);
8128
8129 /* This read_csr is really bad in the hot path */
8130 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008131 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8132 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008133 if (likely(status)) {
8134 /* clear the interrupt(s) */
8135 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008136 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8137 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008138
8139 /* handle the interrupt(s) */
8140 sdma_engine_interrupt(sde, status);
8141 } else
8142 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008143 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008144
8145 return IRQ_HANDLED;
8146}
8147
8148/*
Dean Luickecd42f82016-02-03 14:35:14 -08008149 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8150 * to insure that the write completed. This does NOT guarantee that
8151 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008152 */
8153static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8154{
8155 struct hfi1_devdata *dd = rcd->dd;
8156 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8157
8158 mmiowb(); /* make sure everything before is written */
8159 write_csr(dd, addr, rcd->imask);
8160 /* force the above write on the chip and get a value back */
8161 (void)read_csr(dd, addr);
8162}
8163
8164/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008165void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008166{
8167 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8168}
8169
Dean Luickecd42f82016-02-03 14:35:14 -08008170/*
8171 * Return non-zero if a packet is present.
8172 *
8173 * This routine is called when rechecking for packets after the RcvAvail
8174 * interrupt has been cleared down. First, do a quick check of memory for
8175 * a packet present. If not found, use an expensive CSR read of the context
8176 * tail to determine the actual tail. The CSR read is necessary because there
8177 * is no method to push pending DMAs to memory other than an interrupt and we
8178 * are trying to determine if we need to force an interrupt.
8179 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008180static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8181{
Dean Luickecd42f82016-02-03 14:35:14 -08008182 u32 tail;
8183 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008184
Dean Luickecd42f82016-02-03 14:35:14 -08008185 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8186 present = (rcd->seq_cnt ==
8187 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8188 else /* is RDMA rtail */
8189 present = (rcd->head != get_rcvhdrtail(rcd));
8190
8191 if (present)
8192 return 1;
8193
8194 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8195 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8196 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008197}
8198
8199/*
8200 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8201 * This routine will try to handle packets immediately (latency), but if
8202 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008203 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008204 * invoked) is finished. The intent is to avoid extra interrupts while we
8205 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008206 */
8207static irqreturn_t receive_context_interrupt(int irq, void *data)
8208{
8209 struct hfi1_ctxtdata *rcd = data;
8210 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008211 int disposition;
8212 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008213
8214 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8215 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008216 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008217
Dean Luickf4f30031c2015-10-26 10:28:44 -04008218 /* receive interrupt remains blocked while processing packets */
8219 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008220
Dean Luickf4f30031c2015-10-26 10:28:44 -04008221 /*
8222 * Too many packets were seen while processing packets in this
8223 * IRQ handler. Invoke the handler thread. The receive interrupt
8224 * remains blocked.
8225 */
8226 if (disposition == RCV_PKT_LIMIT)
8227 return IRQ_WAKE_THREAD;
8228
8229 /*
8230 * The packet processor detected no more packets. Clear the receive
8231 * interrupt and recheck for a packet packet that may have arrived
8232 * after the previous check and interrupt clear. If a packet arrived,
8233 * force another interrupt.
8234 */
8235 clear_recv_intr(rcd);
8236 present = check_packet_present(rcd);
8237 if (present)
8238 force_recv_intr(rcd);
8239
8240 return IRQ_HANDLED;
8241}
8242
8243/*
8244 * Receive packet thread handler. This expects to be invoked with the
8245 * receive interrupt still blocked.
8246 */
8247static irqreturn_t receive_context_thread(int irq, void *data)
8248{
8249 struct hfi1_ctxtdata *rcd = data;
8250 int present;
8251
8252 /* receive interrupt is still blocked from the IRQ handler */
8253 (void)rcd->do_interrupt(rcd, 1);
8254
8255 /*
8256 * The packet processor will only return if it detected no more
8257 * packets. Hold IRQs here so we can safely clear the interrupt and
8258 * recheck for a packet that may have arrived after the previous
8259 * check and the interrupt clear. If a packet arrived, force another
8260 * interrupt.
8261 */
8262 local_irq_disable();
8263 clear_recv_intr(rcd);
8264 present = check_packet_present(rcd);
8265 if (present)
8266 force_recv_intr(rcd);
8267 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008268
8269 return IRQ_HANDLED;
8270}
8271
8272/* ========================================================================= */
8273
8274u32 read_physical_state(struct hfi1_devdata *dd)
8275{
8276 u64 reg;
8277
8278 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8279 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8280 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8281}
8282
Jim Snowfb9036d2016-01-11 18:32:21 -05008283u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008284{
8285 u64 reg;
8286
8287 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8288 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8289 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8290}
8291
8292static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8293{
8294 u64 reg;
8295
8296 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8297 /* clear current state, set new state */
8298 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8299 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8300 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8301}
8302
8303/*
8304 * Use the 8051 to read a LCB CSR.
8305 */
8306static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8307{
8308 u32 regno;
8309 int ret;
8310
8311 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8312 if (acquire_lcb_access(dd, 0) == 0) {
8313 *data = read_csr(dd, addr);
8314 release_lcb_access(dd, 0);
8315 return 0;
8316 }
8317 return -EBUSY;
8318 }
8319
8320 /* register is an index of LCB registers: (offset - base) / 8 */
8321 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8322 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8323 if (ret != HCMD_SUCCESS)
8324 return -EBUSY;
8325 return 0;
8326}
8327
8328/*
8329 * Read an LCB CSR. Access may not be in host control, so check.
8330 * Return 0 on success, -EBUSY on failure.
8331 */
8332int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8333{
8334 struct hfi1_pportdata *ppd = dd->pport;
8335
8336 /* if up, go through the 8051 for the value */
8337 if (ppd->host_link_state & HLS_UP)
8338 return read_lcb_via_8051(dd, addr, data);
8339 /* if going up or down, no access */
8340 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8341 return -EBUSY;
8342 /* otherwise, host has access */
8343 *data = read_csr(dd, addr);
8344 return 0;
8345}
8346
8347/*
8348 * Use the 8051 to write a LCB CSR.
8349 */
8350static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8351{
Dean Luick3bf40d62015-11-06 20:07:04 -05008352 u32 regno;
8353 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008354
Dean Luick3bf40d62015-11-06 20:07:04 -05008355 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8356 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8357 if (acquire_lcb_access(dd, 0) == 0) {
8358 write_csr(dd, addr, data);
8359 release_lcb_access(dd, 0);
8360 return 0;
8361 }
8362 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008363 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008364
8365 /* register is an index of LCB registers: (offset - base) / 8 */
8366 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8367 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8368 if (ret != HCMD_SUCCESS)
8369 return -EBUSY;
8370 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008371}
8372
8373/*
8374 * Write an LCB CSR. Access may not be in host control, so check.
8375 * Return 0 on success, -EBUSY on failure.
8376 */
8377int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8378{
8379 struct hfi1_pportdata *ppd = dd->pport;
8380
8381 /* if up, go through the 8051 for the value */
8382 if (ppd->host_link_state & HLS_UP)
8383 return write_lcb_via_8051(dd, addr, data);
8384 /* if going up or down, no access */
8385 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8386 return -EBUSY;
8387 /* otherwise, host has access */
8388 write_csr(dd, addr, data);
8389 return 0;
8390}
8391
8392/*
8393 * Returns:
8394 * < 0 = Linux error, not able to get access
8395 * > 0 = 8051 command RETURN_CODE
8396 */
8397static int do_8051_command(
8398 struct hfi1_devdata *dd,
8399 u32 type,
8400 u64 in_data,
8401 u64 *out_data)
8402{
8403 u64 reg, completed;
8404 int return_code;
8405 unsigned long flags;
8406 unsigned long timeout;
8407
8408 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8409
8410 /*
8411 * Alternative to holding the lock for a long time:
8412 * - keep busy wait - have other users bounce off
8413 */
8414 spin_lock_irqsave(&dd->dc8051_lock, flags);
8415
8416 /* We can't send any commands to the 8051 if it's in reset */
8417 if (dd->dc_shutdown) {
8418 return_code = -ENODEV;
8419 goto fail;
8420 }
8421
8422 /*
8423 * If an 8051 host command timed out previously, then the 8051 is
8424 * stuck.
8425 *
8426 * On first timeout, attempt to reset and restart the entire DC
8427 * block (including 8051). (Is this too big of a hammer?)
8428 *
8429 * If the 8051 times out a second time, the reset did not bring it
8430 * back to healthy life. In that case, fail any subsequent commands.
8431 */
8432 if (dd->dc8051_timed_out) {
8433 if (dd->dc8051_timed_out > 1) {
8434 dd_dev_err(dd,
8435 "Previous 8051 host command timed out, skipping command %u\n",
8436 type);
8437 return_code = -ENXIO;
8438 goto fail;
8439 }
8440 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8441 dc_shutdown(dd);
8442 dc_start(dd);
8443 spin_lock_irqsave(&dd->dc8051_lock, flags);
8444 }
8445
8446 /*
8447 * If there is no timeout, then the 8051 command interface is
8448 * waiting for a command.
8449 */
8450
8451 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008452 * When writing a LCB CSR, out_data contains the full value to
8453 * to be written, while in_data contains the relative LCB
8454 * address in 7:0. Do the work here, rather than the caller,
8455 * of distrubting the write data to where it needs to go:
8456 *
8457 * Write data
8458 * 39:00 -> in_data[47:8]
8459 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8460 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8461 */
8462 if (type == HCMD_WRITE_LCB_CSR) {
8463 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8464 reg = ((((*out_data) >> 40) & 0xff) <<
8465 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8466 | ((((*out_data) >> 48) & 0xffff) <<
8467 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8468 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8469 }
8470
8471 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008472 * Do two writes: the first to stabilize the type and req_data, the
8473 * second to activate.
8474 */
8475 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8476 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8477 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8478 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8479 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8480 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8481 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8482
8483 /* wait for completion, alternate: interrupt */
8484 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8485 while (1) {
8486 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8487 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8488 if (completed)
8489 break;
8490 if (time_after(jiffies, timeout)) {
8491 dd->dc8051_timed_out++;
8492 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8493 if (out_data)
8494 *out_data = 0;
8495 return_code = -ETIMEDOUT;
8496 goto fail;
8497 }
8498 udelay(2);
8499 }
8500
8501 if (out_data) {
8502 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8503 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8504 if (type == HCMD_READ_LCB_CSR) {
8505 /* top 16 bits are in a different register */
8506 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8507 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8508 << (48
8509 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8510 }
8511 }
8512 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8513 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8514 dd->dc8051_timed_out = 0;
8515 /*
8516 * Clear command for next user.
8517 */
8518 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8519
8520fail:
8521 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8522
8523 return return_code;
8524}
8525
8526static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8527{
8528 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8529}
8530
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008531int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8532 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008533{
8534 u64 data;
8535 int ret;
8536
8537 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8538 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8539 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8540 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8541 if (ret != HCMD_SUCCESS) {
8542 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008543 "load 8051 config: field id %d, lane %d, err %d\n",
8544 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008545 }
8546 return ret;
8547}
8548
8549/*
8550 * Read the 8051 firmware "registers". Use the RAM directly. Always
8551 * set the result, even on error.
8552 * Return 0 on success, -errno on failure
8553 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008554int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8555 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008556{
8557 u64 big_data;
8558 u32 addr;
8559 int ret;
8560
8561 /* address start depends on the lane_id */
8562 if (lane_id < 4)
8563 addr = (4 * NUM_GENERAL_FIELDS)
8564 + (lane_id * 4 * NUM_LANE_FIELDS);
8565 else
8566 addr = 0;
8567 addr += field_id * 4;
8568
8569 /* read is in 8-byte chunks, hardware will truncate the address down */
8570 ret = read_8051_data(dd, addr, 8, &big_data);
8571
8572 if (ret == 0) {
8573 /* extract the 4 bytes we want */
8574 if (addr & 0x4)
8575 *result = (u32)(big_data >> 32);
8576 else
8577 *result = (u32)big_data;
8578 } else {
8579 *result = 0;
8580 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008581 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008582 }
8583
8584 return ret;
8585}
8586
8587static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8588 u8 continuous)
8589{
8590 u32 frame;
8591
8592 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8593 | power_management << POWER_MANAGEMENT_SHIFT;
8594 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8595 GENERAL_CONFIG, frame);
8596}
8597
8598static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8599 u16 vl15buf, u8 crc_sizes)
8600{
8601 u32 frame;
8602
8603 frame = (u32)vau << VAU_SHIFT
8604 | (u32)z << Z_SHIFT
8605 | (u32)vcu << VCU_SHIFT
8606 | (u32)vl15buf << VL15BUF_SHIFT
8607 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8608 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8609 GENERAL_CONFIG, frame);
8610}
8611
8612static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8613 u8 *flag_bits, u16 *link_widths)
8614{
8615 u32 frame;
8616
8617 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008618 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008619 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8620 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8621 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8622}
8623
8624static int write_vc_local_link_width(struct hfi1_devdata *dd,
8625 u8 misc_bits,
8626 u8 flag_bits,
8627 u16 link_widths)
8628{
8629 u32 frame;
8630
8631 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8632 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8633 | (u32)link_widths << LINK_WIDTH_SHIFT;
8634 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8635 frame);
8636}
8637
8638static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8639 u8 device_rev)
8640{
8641 u32 frame;
8642
8643 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8644 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8645 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8646}
8647
8648static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8649 u8 *device_rev)
8650{
8651 u32 frame;
8652
8653 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8654 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8655 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8656 & REMOTE_DEVICE_REV_MASK;
8657}
8658
8659void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8660{
8661 u32 frame;
8662
8663 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8664 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8665 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8666}
8667
8668static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8669 u8 *continuous)
8670{
8671 u32 frame;
8672
8673 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8674 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8675 & POWER_MANAGEMENT_MASK;
8676 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8677 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8678}
8679
8680static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8681 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8682{
8683 u32 frame;
8684
8685 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8686 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8687 *z = (frame >> Z_SHIFT) & Z_MASK;
8688 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8689 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8690 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8691}
8692
8693static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8694 u8 *remote_tx_rate,
8695 u16 *link_widths)
8696{
8697 u32 frame;
8698
8699 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008700 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008701 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8702 & REMOTE_TX_RATE_MASK;
8703 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8704}
8705
8706static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8707{
8708 u32 frame;
8709
8710 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8711 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8712}
8713
8714static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8715{
8716 u32 frame;
8717
8718 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8719 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8720}
8721
8722static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8723{
8724 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8725}
8726
8727static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8728{
8729 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8730}
8731
8732void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8733{
8734 u32 frame;
8735 int ret;
8736
8737 *link_quality = 0;
8738 if (dd->pport->host_link_state & HLS_UP) {
8739 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008740 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008741 if (ret == 0)
8742 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8743 & LINK_QUALITY_MASK;
8744 }
8745}
8746
8747static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8748{
8749 u32 frame;
8750
8751 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8752 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8753}
8754
Dean Luickfeb831d2016-04-14 08:31:36 -07008755static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8756{
8757 u32 frame;
8758
8759 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8760 *ldr = (frame & 0xff);
8761}
8762
Mike Marciniszyn77241052015-07-30 15:17:43 -04008763static int read_tx_settings(struct hfi1_devdata *dd,
8764 u8 *enable_lane_tx,
8765 u8 *tx_polarity_inversion,
8766 u8 *rx_polarity_inversion,
8767 u8 *max_rate)
8768{
8769 u32 frame;
8770 int ret;
8771
8772 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8773 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8774 & ENABLE_LANE_TX_MASK;
8775 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8776 & TX_POLARITY_INVERSION_MASK;
8777 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8778 & RX_POLARITY_INVERSION_MASK;
8779 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8780 return ret;
8781}
8782
8783static int write_tx_settings(struct hfi1_devdata *dd,
8784 u8 enable_lane_tx,
8785 u8 tx_polarity_inversion,
8786 u8 rx_polarity_inversion,
8787 u8 max_rate)
8788{
8789 u32 frame;
8790
8791 /* no need to mask, all variable sizes match field widths */
8792 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8793 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8794 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8795 | max_rate << MAX_RATE_SHIFT;
8796 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8797}
8798
8799static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8800{
8801 u32 frame, version, prod_id;
8802 int ret, lane;
8803
8804 /* 4 lanes */
8805 for (lane = 0; lane < 4; lane++) {
8806 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8807 if (ret) {
Jubin John17fb4f22016-02-14 20:21:52 -08008808 dd_dev_err(dd,
8809 "Unable to read lane %d firmware details\n",
8810 lane);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008811 continue;
8812 }
8813 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8814 & SPICO_ROM_VERSION_MASK;
8815 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8816 & SPICO_ROM_PROD_ID_MASK;
8817 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008818 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8819 lane, version, prod_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008820 }
8821}
8822
8823/*
8824 * Read an idle LCB message.
8825 *
8826 * Returns 0 on success, -EINVAL on error
8827 */
8828static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8829{
8830 int ret;
8831
Jubin John17fb4f22016-02-14 20:21:52 -08008832 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008833 if (ret != HCMD_SUCCESS) {
8834 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008835 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008836 return -EINVAL;
8837 }
8838 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8839 /* return only the payload as we already know the type */
8840 *data_out >>= IDLE_PAYLOAD_SHIFT;
8841 return 0;
8842}
8843
8844/*
8845 * Read an idle SMA message. To be done in response to a notification from
8846 * the 8051.
8847 *
8848 * Returns 0 on success, -EINVAL on error
8849 */
8850static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8851{
Jubin John17fb4f22016-02-14 20:21:52 -08008852 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8853 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008854}
8855
8856/*
8857 * Send an idle LCB message.
8858 *
8859 * Returns 0 on success, -EINVAL on error
8860 */
8861static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8862{
8863 int ret;
8864
8865 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8866 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8867 if (ret != HCMD_SUCCESS) {
8868 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008869 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008870 return -EINVAL;
8871 }
8872 return 0;
8873}
8874
8875/*
8876 * Send an idle SMA message.
8877 *
8878 * Returns 0 on success, -EINVAL on error
8879 */
8880int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8881{
8882 u64 data;
8883
Jubin John17fb4f22016-02-14 20:21:52 -08008884 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8885 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008886 return send_idle_message(dd, data);
8887}
8888
8889/*
8890 * Initialize the LCB then do a quick link up. This may or may not be
8891 * in loopback.
8892 *
8893 * return 0 on success, -errno on error
8894 */
8895static int do_quick_linkup(struct hfi1_devdata *dd)
8896{
8897 u64 reg;
8898 unsigned long timeout;
8899 int ret;
8900
8901 lcb_shutdown(dd, 0);
8902
8903 if (loopback) {
8904 /* LCB_CFG_LOOPBACK.VAL = 2 */
8905 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8906 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008907 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008908 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8909 }
8910
8911 /* start the LCBs */
8912 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8913 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8914
8915 /* simulator only loopback steps */
8916 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8917 /* LCB_CFG_RUN.EN = 1 */
8918 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008919 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008920
8921 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8922 timeout = jiffies + msecs_to_jiffies(10);
8923 while (1) {
Jubin John17fb4f22016-02-14 20:21:52 -08008924 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008925 if (reg)
8926 break;
8927 if (time_after(jiffies, timeout)) {
8928 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008929 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008930 return -ETIMEDOUT;
8931 }
8932 udelay(2);
8933 }
8934
8935 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008936 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008937 }
8938
8939 if (!loopback) {
8940 /*
8941 * When doing quick linkup and not in loopback, both
8942 * sides must be done with LCB set-up before either
8943 * starts the quick linkup. Put a delay here so that
8944 * both sides can be started and have a chance to be
8945 * done with LCB set up before resuming.
8946 */
8947 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008948 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008949 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008950 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008951 }
8952
8953 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8954 set_8051_lcb_access(dd);
8955
8956 /*
8957 * State "quick" LinkUp request sets the physical link state to
8958 * LinkUp without a verify capability sequence.
8959 * This state is in simulator v37 and later.
8960 */
8961 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8962 if (ret != HCMD_SUCCESS) {
8963 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008964 "%s: set physical link state to quick LinkUp failed with return %d\n",
8965 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008966
8967 set_host_lcb_access(dd);
8968 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8969
8970 if (ret >= 0)
8971 ret = -EINVAL;
8972 return ret;
8973 }
8974
8975 return 0; /* success */
8976}
8977
8978/*
8979 * Set the SerDes to internal loopback mode.
8980 * Returns 0 on success, -errno on error.
8981 */
8982static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8983{
8984 int ret;
8985
8986 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8987 if (ret == HCMD_SUCCESS)
8988 return 0;
8989 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008990 "Set physical link state to SerDes Loopback failed with return %d\n",
8991 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008992 if (ret >= 0)
8993 ret = -EINVAL;
8994 return ret;
8995}
8996
8997/*
8998 * Do all special steps to set up loopback.
8999 */
9000static int init_loopback(struct hfi1_devdata *dd)
9001{
9002 dd_dev_info(dd, "Entering loopback mode\n");
9003
9004 /* all loopbacks should disable self GUID check */
9005 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009006 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009007
9008 /*
9009 * The simulator has only one loopback option - LCB. Switch
9010 * to that option, which includes quick link up.
9011 *
9012 * Accept all valid loopback values.
9013 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009014 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9015 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9016 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009017 loopback = LOOPBACK_LCB;
9018 quick_linkup = 1;
9019 return 0;
9020 }
9021
9022 /* handle serdes loopback */
9023 if (loopback == LOOPBACK_SERDES) {
9024 /* internal serdes loopack needs quick linkup on RTL */
9025 if (dd->icode == ICODE_RTL_SILICON)
9026 quick_linkup = 1;
9027 return set_serdes_loopback_mode(dd);
9028 }
9029
9030 /* LCB loopback - handled at poll time */
9031 if (loopback == LOOPBACK_LCB) {
9032 quick_linkup = 1; /* LCB is always quick linkup */
9033
9034 /* not supported in emulation due to emulation RTL changes */
9035 if (dd->icode == ICODE_FPGA_EMULATION) {
9036 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009037 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009038 return -EINVAL;
9039 }
9040 return 0;
9041 }
9042
9043 /* external cable loopback requires no extra steps */
9044 if (loopback == LOOPBACK_CABLE)
9045 return 0;
9046
9047 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9048 return -EINVAL;
9049}
9050
9051/*
9052 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9053 * used in the Verify Capability link width attribute.
9054 */
9055static u16 opa_to_vc_link_widths(u16 opa_widths)
9056{
9057 int i;
9058 u16 result = 0;
9059
9060 static const struct link_bits {
9061 u16 from;
9062 u16 to;
9063 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009064 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9065 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9066 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9067 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009068 };
9069
9070 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9071 if (opa_widths & opa_link_xlate[i].from)
9072 result |= opa_link_xlate[i].to;
9073 }
9074 return result;
9075}
9076
9077/*
9078 * Set link attributes before moving to polling.
9079 */
9080static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9081{
9082 struct hfi1_devdata *dd = ppd->dd;
9083 u8 enable_lane_tx;
9084 u8 tx_polarity_inversion;
9085 u8 rx_polarity_inversion;
9086 int ret;
9087
9088 /* reset our fabric serdes to clear any lingering problems */
9089 fabric_serdes_reset(dd);
9090
9091 /* set the local tx rate - need to read-modify-write */
9092 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009093 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009094 if (ret)
9095 goto set_local_link_attributes_fail;
9096
9097 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9098 /* set the tx rate to the fastest enabled */
9099 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9100 ppd->local_tx_rate = 1;
9101 else
9102 ppd->local_tx_rate = 0;
9103 } else {
9104 /* set the tx rate to all enabled */
9105 ppd->local_tx_rate = 0;
9106 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9107 ppd->local_tx_rate |= 2;
9108 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9109 ppd->local_tx_rate |= 1;
9110 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009111
9112 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009113 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009114 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009115 if (ret != HCMD_SUCCESS)
9116 goto set_local_link_attributes_fail;
9117
9118 /*
9119 * DC supports continuous updates.
9120 */
Jubin John17fb4f22016-02-14 20:21:52 -08009121 ret = write_vc_local_phy(dd,
9122 0 /* no power management */,
9123 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009124 if (ret != HCMD_SUCCESS)
9125 goto set_local_link_attributes_fail;
9126
9127 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9128 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9129 ppd->port_crc_mode_enabled);
9130 if (ret != HCMD_SUCCESS)
9131 goto set_local_link_attributes_fail;
9132
9133 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009134 opa_to_vc_link_widths(
9135 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009136 if (ret != HCMD_SUCCESS)
9137 goto set_local_link_attributes_fail;
9138
9139 /* let peer know who we are */
9140 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9141 if (ret == HCMD_SUCCESS)
9142 return 0;
9143
9144set_local_link_attributes_fail:
9145 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009146 "Failed to set local link attributes, return 0x%x\n",
9147 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009148 return ret;
9149}
9150
9151/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009152 * Call this to start the link.
9153 * Do not do anything if the link is disabled.
9154 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009155 */
9156int start_link(struct hfi1_pportdata *ppd)
9157{
9158 if (!ppd->link_enabled) {
9159 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009160 "%s: stopping link start because link is disabled\n",
9161 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009162 return 0;
9163 }
9164 if (!ppd->driver_link_ready) {
9165 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009166 "%s: stopping link start because driver is not ready\n",
9167 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009168 return 0;
9169 }
9170
Easwar Hariharan623bba22016-04-12 11:25:57 -07009171 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009172}
9173
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009174static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9175{
9176 struct hfi1_devdata *dd = ppd->dd;
9177 u64 mask;
9178 unsigned long timeout;
9179
9180 /*
9181 * Check for QSFP interrupt for t_init (SFF 8679)
9182 */
9183 timeout = jiffies + msecs_to_jiffies(2000);
9184 while (1) {
9185 mask = read_csr(dd, dd->hfi1_id ?
9186 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9187 if (!(mask & QSFP_HFI0_INT_N)) {
9188 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9189 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9190 break;
9191 }
9192 if (time_after(jiffies, timeout)) {
9193 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9194 __func__);
9195 break;
9196 }
9197 udelay(2);
9198 }
9199}
9200
9201static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9202{
9203 struct hfi1_devdata *dd = ppd->dd;
9204 u64 mask;
9205
9206 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9207 if (enable)
9208 mask |= (u64)QSFP_HFI0_INT_N;
9209 else
9210 mask &= ~(u64)QSFP_HFI0_INT_N;
9211 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9212}
9213
9214void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009215{
9216 struct hfi1_devdata *dd = ppd->dd;
9217 u64 mask, qsfp_mask;
9218
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009219 /* Disable INT_N from triggering QSFP interrupts */
9220 set_qsfp_int_n(ppd, 0);
9221
9222 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009223 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009224
9225 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009226 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009227 qsfp_mask &= ~mask;
9228 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009229 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009230
9231 udelay(10);
9232
9233 qsfp_mask |= mask;
9234 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009235 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009236
9237 wait_for_qsfp_init(ppd);
9238
9239 /*
9240 * Allow INT_N to trigger the QSFP interrupt to watch
9241 * for alarms and warnings
9242 */
9243 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009244}
9245
9246static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9247 u8 *qsfp_interrupt_status)
9248{
9249 struct hfi1_devdata *dd = ppd->dd;
9250
9251 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009252 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9253 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9254 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009255
9256 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009257 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9258 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9259 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009260
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009261 /*
9262 * The remaining alarms/warnings don't matter if the link is down.
9263 */
9264 if (ppd->host_link_state & HLS_DOWN)
9265 return 0;
9266
Mike Marciniszyn77241052015-07-30 15:17:43 -04009267 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009268 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9269 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9270 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009271
9272 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009273 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9274 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9275 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009276
9277 /* Byte 2 is vendor specific */
9278
9279 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009280 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9281 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9282 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009283
9284 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009285 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9286 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9287 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009288
9289 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009290 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9291 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9292 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009293
9294 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009295 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9296 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9297 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009298
9299 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009300 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9301 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9302 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009303
9304 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009305 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9306 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9307 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009308
9309 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009310 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9311 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9312 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009313
9314 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009315 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9316 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9317 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009318
9319 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009320 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9321 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9322 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009323
9324 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009325 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9326 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9327 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009328
9329 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009330 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9331 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9332 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009333
9334 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009335 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9336 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9337 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009338
9339 /* Bytes 9-10 and 11-12 are reserved */
9340 /* Bytes 13-15 are vendor specific */
9341
9342 return 0;
9343}
9344
Easwar Hariharan623bba22016-04-12 11:25:57 -07009345/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009346void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009347{
9348 struct qsfp_data *qd;
9349 struct hfi1_pportdata *ppd;
9350 struct hfi1_devdata *dd;
9351
9352 qd = container_of(work, struct qsfp_data, qsfp_work);
9353 ppd = qd->ppd;
9354 dd = ppd->dd;
9355
9356 /* Sanity check */
9357 if (!qsfp_mod_present(ppd))
9358 return;
9359
9360 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009361 * Turn DC back on after cable has been re-inserted. Up until
9362 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009363 */
9364 dc_start(dd);
9365
9366 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009367 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009368
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009369 wait_for_qsfp_init(ppd);
9370
9371 /*
9372 * Allow INT_N to trigger the QSFP interrupt to watch
9373 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009374 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009375 set_qsfp_int_n(ppd, 1);
9376
9377 tune_serdes(ppd);
9378
9379 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009380 }
9381
9382 if (qd->check_interrupt_flags) {
9383 u8 qsfp_interrupt_status[16] = {0,};
9384
Dean Luick765a6fa2016-03-05 08:50:06 -08009385 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9386 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009387 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009388 "%s: Failed to read status of QSFP module\n",
9389 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009390 } else {
9391 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009392
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009393 handle_qsfp_error_conditions(
9394 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009395 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9396 ppd->qsfp_info.check_interrupt_flags = 0;
9397 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009398 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009399 }
9400 }
9401}
9402
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009403static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009404{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009405 struct hfi1_pportdata *ppd = dd->pport;
9406 u64 qsfp_mask, cce_int_mask;
9407 const int qsfp1_int_smask = QSFP1_INT % 64;
9408 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009409
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009410 /*
9411 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9412 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9413 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9414 * the index of the appropriate CSR in the CCEIntMask CSR array
9415 */
9416 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9417 (8 * (QSFP1_INT / 64)));
9418 if (dd->hfi1_id) {
9419 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9420 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9421 cce_int_mask);
9422 } else {
9423 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9424 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9425 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009426 }
9427
Mike Marciniszyn77241052015-07-30 15:17:43 -04009428 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9429 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009430 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9431 qsfp_mask);
9432 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9433 qsfp_mask);
9434
9435 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009436
9437 /* Handle active low nature of INT_N and MODPRST_N pins */
9438 if (qsfp_mod_present(ppd))
9439 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9440 write_csr(dd,
9441 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9442 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009443}
9444
Dean Luickbbdeb332015-12-01 15:38:15 -05009445/*
9446 * Do a one-time initialize of the LCB block.
9447 */
9448static void init_lcb(struct hfi1_devdata *dd)
9449{
Dean Luicka59329d2016-02-03 14:32:31 -08009450 /* simulator does not correctly handle LCB cclk loopback, skip */
9451 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9452 return;
9453
Dean Luickbbdeb332015-12-01 15:38:15 -05009454 /* the DC has been reset earlier in the driver load */
9455
9456 /* set LCB for cclk loopback on the port */
9457 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9458 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9459 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9460 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9461 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9462 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9463 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9464}
9465
Mike Marciniszyn77241052015-07-30 15:17:43 -04009466int bringup_serdes(struct hfi1_pportdata *ppd)
9467{
9468 struct hfi1_devdata *dd = ppd->dd;
9469 u64 guid;
9470 int ret;
9471
9472 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9473 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9474
9475 guid = ppd->guid;
9476 if (!guid) {
9477 if (dd->base_guid)
9478 guid = dd->base_guid + ppd->port - 1;
9479 ppd->guid = guid;
9480 }
9481
Mike Marciniszyn77241052015-07-30 15:17:43 -04009482 /* Set linkinit_reason on power up per OPA spec */
9483 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9484
Dean Luickbbdeb332015-12-01 15:38:15 -05009485 /* one-time init of the LCB */
9486 init_lcb(dd);
9487
Mike Marciniszyn77241052015-07-30 15:17:43 -04009488 if (loopback) {
9489 ret = init_loopback(dd);
9490 if (ret < 0)
9491 return ret;
9492 }
9493
Easwar Hariharan9775a992016-05-12 10:22:39 -07009494 get_port_type(ppd);
9495 if (ppd->port_type == PORT_TYPE_QSFP) {
9496 set_qsfp_int_n(ppd, 0);
9497 wait_for_qsfp_init(ppd);
9498 set_qsfp_int_n(ppd, 1);
9499 }
9500
9501 /*
9502 * Tune the SerDes to a ballpark setting for
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009503 * optimal signal and bit error rate
9504 * Needs to be done before starting the link
9505 */
9506 tune_serdes(ppd);
9507
Mike Marciniszyn77241052015-07-30 15:17:43 -04009508 return start_link(ppd);
9509}
9510
9511void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9512{
9513 struct hfi1_devdata *dd = ppd->dd;
9514
9515 /*
9516 * Shut down the link and keep it down. First turn off that the
9517 * driver wants to allow the link to be up (driver_link_ready).
9518 * Then make sure the link is not automatically restarted
9519 * (link_enabled). Cancel any pending restart. And finally
9520 * go offline.
9521 */
9522 ppd->driver_link_ready = 0;
9523 ppd->link_enabled = 0;
9524
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009525 ppd->offline_disabled_reason =
9526 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009527 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009528 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009529 set_link_state(ppd, HLS_DN_OFFLINE);
9530
9531 /* disable the port */
9532 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9533}
9534
9535static inline int init_cpu_counters(struct hfi1_devdata *dd)
9536{
9537 struct hfi1_pportdata *ppd;
9538 int i;
9539
9540 ppd = (struct hfi1_pportdata *)(dd + 1);
9541 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009542 ppd->ibport_data.rvp.rc_acks = NULL;
9543 ppd->ibport_data.rvp.rc_qacks = NULL;
9544 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9545 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9546 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9547 if (!ppd->ibport_data.rvp.rc_acks ||
9548 !ppd->ibport_data.rvp.rc_delayed_comp ||
9549 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009550 return -ENOMEM;
9551 }
9552
9553 return 0;
9554}
9555
9556static const char * const pt_names[] = {
9557 "expected",
9558 "eager",
9559 "invalid"
9560};
9561
9562static const char *pt_name(u32 type)
9563{
9564 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9565}
9566
9567/*
9568 * index is the index into the receive array
9569 */
9570void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9571 u32 type, unsigned long pa, u16 order)
9572{
9573 u64 reg;
9574 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9575 (dd->kregbase + RCV_ARRAY));
9576
9577 if (!(dd->flags & HFI1_PRESENT))
9578 goto done;
9579
9580 if (type == PT_INVALID) {
9581 pa = 0;
9582 } else if (type > PT_INVALID) {
9583 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009584 "unexpected receive array type %u for index %u, not handled\n",
9585 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009586 goto done;
9587 }
9588
9589 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9590 pt_name(type), index, pa, (unsigned long)order);
9591
9592#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9593 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9594 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9595 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9596 << RCV_ARRAY_RT_ADDR_SHIFT;
9597 writeq(reg, base + (index * 8));
9598
9599 if (type == PT_EAGER)
9600 /*
9601 * Eager entries are written one-by-one so we have to push them
9602 * after we write the entry.
9603 */
9604 flush_wc();
9605done:
9606 return;
9607}
9608
9609void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9610{
9611 struct hfi1_devdata *dd = rcd->dd;
9612 u32 i;
9613
9614 /* this could be optimized */
9615 for (i = rcd->eager_base; i < rcd->eager_base +
9616 rcd->egrbufs.alloced; i++)
9617 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9618
9619 for (i = rcd->expected_base;
9620 i < rcd->expected_base + rcd->expected_count; i++)
9621 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9622}
9623
9624int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9625 struct hfi1_ctxt_info *kinfo)
9626{
9627 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9628 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9629 return 0;
9630}
9631
9632struct hfi1_message_header *hfi1_get_msgheader(
9633 struct hfi1_devdata *dd, __le32 *rhf_addr)
9634{
9635 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9636
9637 return (struct hfi1_message_header *)
9638 (rhf_addr - dd->rhf_offset + offset);
9639}
9640
9641static const char * const ib_cfg_name_strings[] = {
9642 "HFI1_IB_CFG_LIDLMC",
9643 "HFI1_IB_CFG_LWID_DG_ENB",
9644 "HFI1_IB_CFG_LWID_ENB",
9645 "HFI1_IB_CFG_LWID",
9646 "HFI1_IB_CFG_SPD_ENB",
9647 "HFI1_IB_CFG_SPD",
9648 "HFI1_IB_CFG_RXPOL_ENB",
9649 "HFI1_IB_CFG_LREV_ENB",
9650 "HFI1_IB_CFG_LINKLATENCY",
9651 "HFI1_IB_CFG_HRTBT",
9652 "HFI1_IB_CFG_OP_VLS",
9653 "HFI1_IB_CFG_VL_HIGH_CAP",
9654 "HFI1_IB_CFG_VL_LOW_CAP",
9655 "HFI1_IB_CFG_OVERRUN_THRESH",
9656 "HFI1_IB_CFG_PHYERR_THRESH",
9657 "HFI1_IB_CFG_LINKDEFAULT",
9658 "HFI1_IB_CFG_PKEYS",
9659 "HFI1_IB_CFG_MTU",
9660 "HFI1_IB_CFG_LSTATE",
9661 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9662 "HFI1_IB_CFG_PMA_TICKS",
9663 "HFI1_IB_CFG_PORT"
9664};
9665
9666static const char *ib_cfg_name(int which)
9667{
9668 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9669 return "invalid";
9670 return ib_cfg_name_strings[which];
9671}
9672
9673int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9674{
9675 struct hfi1_devdata *dd = ppd->dd;
9676 int val = 0;
9677
9678 switch (which) {
9679 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9680 val = ppd->link_width_enabled;
9681 break;
9682 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9683 val = ppd->link_width_active;
9684 break;
9685 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9686 val = ppd->link_speed_enabled;
9687 break;
9688 case HFI1_IB_CFG_SPD: /* current Link speed */
9689 val = ppd->link_speed_active;
9690 break;
9691
9692 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9693 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9694 case HFI1_IB_CFG_LINKLATENCY:
9695 goto unimplemented;
9696
9697 case HFI1_IB_CFG_OP_VLS:
9698 val = ppd->vls_operational;
9699 break;
9700 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9701 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9702 break;
9703 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9704 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9705 break;
9706 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9707 val = ppd->overrun_threshold;
9708 break;
9709 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9710 val = ppd->phy_error_threshold;
9711 break;
9712 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9713 val = dd->link_default;
9714 break;
9715
9716 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9717 case HFI1_IB_CFG_PMA_TICKS:
9718 default:
9719unimplemented:
9720 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9721 dd_dev_info(
9722 dd,
9723 "%s: which %s: not implemented\n",
9724 __func__,
9725 ib_cfg_name(which));
9726 break;
9727 }
9728
9729 return val;
9730}
9731
9732/*
9733 * The largest MAD packet size.
9734 */
9735#define MAX_MAD_PACKET 2048
9736
9737/*
9738 * Return the maximum header bytes that can go on the _wire_
9739 * for this device. This count includes the ICRC which is
9740 * not part of the packet held in memory but it is appended
9741 * by the HW.
9742 * This is dependent on the device's receive header entry size.
9743 * HFI allows this to be set per-receive context, but the
9744 * driver presently enforces a global value.
9745 */
9746u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9747{
9748 /*
9749 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9750 * the Receive Header Entry Size minus the PBC (or RHF) size
9751 * plus one DW for the ICRC appended by HW.
9752 *
9753 * dd->rcd[0].rcvhdrqentsize is in DW.
9754 * We use rcd[0] as all context will have the same value. Also,
9755 * the first kernel context would have been allocated by now so
9756 * we are guaranteed a valid value.
9757 */
9758 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9759}
9760
9761/*
9762 * Set Send Length
9763 * @ppd - per port data
9764 *
9765 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9766 * registers compare against LRH.PktLen, so use the max bytes included
9767 * in the LRH.
9768 *
9769 * This routine changes all VL values except VL15, which it maintains at
9770 * the same value.
9771 */
9772static void set_send_length(struct hfi1_pportdata *ppd)
9773{
9774 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009775 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9776 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009777 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9778 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9779 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009780 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009781 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009782
9783 for (i = 0; i < ppd->vls_supported; i++) {
9784 if (dd->vld[i].mtu > maxvlmtu)
9785 maxvlmtu = dd->vld[i].mtu;
9786 if (i <= 3)
9787 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9788 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9789 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9790 else
9791 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9792 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9793 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9794 }
9795 write_csr(dd, SEND_LEN_CHECK0, len1);
9796 write_csr(dd, SEND_LEN_CHECK1, len2);
9797 /* adjust kernel credit return thresholds based on new MTUs */
9798 /* all kernel receive contexts have the same hdrqentsize */
9799 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009800 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9801 sc_mtu_to_threshold(dd->vld[i].sc,
9802 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009803 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009804 for (j = 0; j < INIT_SC_PER_VL; j++)
9805 sc_set_cr_threshold(
9806 pio_select_send_context_vl(dd, j, i),
9807 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009808 }
9809 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9810 sc_mtu_to_threshold(dd->vld[15].sc,
9811 dd->vld[15].mtu,
9812 dd->rcd[0]->rcvhdrqentsize));
9813 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009814
9815 /* Adjust maximum MTU for the port in DC */
9816 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9817 (ilog2(maxvlmtu >> 8) + 1);
9818 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9819 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9820 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9821 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9822 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9823}
9824
9825static void set_lidlmc(struct hfi1_pportdata *ppd)
9826{
9827 int i;
9828 u64 sreg = 0;
9829 struct hfi1_devdata *dd = ppd->dd;
9830 u32 mask = ~((1U << ppd->lmc) - 1);
9831 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9832
9833 if (dd->hfi1_snoop.mode_flag)
9834 dd_dev_info(dd, "Set lid/lmc while snooping");
9835
9836 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9837 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9838 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009839 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009840 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9841 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9842 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9843
9844 /*
9845 * Iterate over all the send contexts and set their SLID check
9846 */
9847 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9848 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9849 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9850 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9851
9852 for (i = 0; i < dd->chip_send_contexts; i++) {
9853 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9854 i, (u32)sreg);
9855 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9856 }
9857
9858 /* Now we have to do the same thing for the sdma engines */
9859 sdma_update_lmc(dd, mask, ppd->lid);
9860}
9861
9862static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9863{
9864 unsigned long timeout;
9865 u32 curr_state;
9866
9867 timeout = jiffies + msecs_to_jiffies(msecs);
9868 while (1) {
9869 curr_state = read_physical_state(dd);
9870 if (curr_state == state)
9871 break;
9872 if (time_after(jiffies, timeout)) {
9873 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009874 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9875 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009876 return -ETIMEDOUT;
9877 }
9878 usleep_range(1950, 2050); /* sleep 2ms-ish */
9879 }
9880
9881 return 0;
9882}
9883
9884/*
9885 * Helper for set_link_state(). Do not call except from that routine.
9886 * Expects ppd->hls_mutex to be held.
9887 *
9888 * @rem_reason value to be sent to the neighbor
9889 *
9890 * LinkDownReasons only set if transition succeeds.
9891 */
9892static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9893{
9894 struct hfi1_devdata *dd = ppd->dd;
9895 u32 pstate, previous_state;
9896 u32 last_local_state;
9897 u32 last_remote_state;
9898 int ret;
9899 int do_transition;
9900 int do_wait;
9901
9902 previous_state = ppd->host_link_state;
9903 ppd->host_link_state = HLS_GOING_OFFLINE;
9904 pstate = read_physical_state(dd);
9905 if (pstate == PLS_OFFLINE) {
9906 do_transition = 0; /* in right state */
9907 do_wait = 0; /* ...no need to wait */
9908 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9909 do_transition = 0; /* in an offline transient state */
9910 do_wait = 1; /* ...wait for it to settle */
9911 } else {
9912 do_transition = 1; /* need to move to offline */
9913 do_wait = 1; /* ...will need to wait */
9914 }
9915
9916 if (do_transition) {
9917 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -08009918 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009919
9920 if (ret != HCMD_SUCCESS) {
9921 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009922 "Failed to transition to Offline link state, return %d\n",
9923 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009924 return -EINVAL;
9925 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009926 if (ppd->offline_disabled_reason ==
9927 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009928 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009929 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009930 }
9931
9932 if (do_wait) {
9933 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009934 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009935 if (ret < 0)
9936 return ret;
9937 }
9938
9939 /* make sure the logical state is also down */
9940 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9941
9942 /*
9943 * Now in charge of LCB - must be after the physical state is
9944 * offline.quiet and before host_link_state is changed.
9945 */
9946 set_host_lcb_access(dd);
9947 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9948 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9949
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009950 if (ppd->port_type == PORT_TYPE_QSFP &&
9951 ppd->qsfp_info.limiting_active &&
9952 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -08009953 int ret;
9954
9955 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9956 if (ret == 0) {
9957 set_qsfp_tx(ppd, 0);
9958 release_chip_resource(dd, qsfp_resource(dd));
9959 } else {
9960 /* not fatal, but should warn */
9961 dd_dev_err(dd,
9962 "Unable to acquire lock to turn off QSFP TX\n");
9963 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009964 }
9965
Mike Marciniszyn77241052015-07-30 15:17:43 -04009966 /*
9967 * The LNI has a mandatory wait time after the physical state
9968 * moves to Offline.Quiet. The wait time may be different
9969 * depending on how the link went down. The 8051 firmware
9970 * will observe the needed wait time and only move to ready
9971 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009972 * is 6s, so wait that long and then at least 0.5s more for
9973 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009974 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009975 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009976 if (ret) {
9977 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009978 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009979 /* state is really offline, so make it so */
9980 ppd->host_link_state = HLS_DN_OFFLINE;
9981 return ret;
9982 }
9983
9984 /*
9985 * The state is now offline and the 8051 is ready to accept host
9986 * requests.
9987 * - change our state
9988 * - notify others if we were previously in a linkup state
9989 */
9990 ppd->host_link_state = HLS_DN_OFFLINE;
9991 if (previous_state & HLS_UP) {
9992 /* went down while link was up */
9993 handle_linkup_change(dd, 0);
9994 } else if (previous_state
9995 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9996 /* went down while attempting link up */
9997 /* byte 1 of last_*_state is the failure reason */
9998 read_last_local_state(dd, &last_local_state);
9999 read_last_remote_state(dd, &last_remote_state);
10000 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010001 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
10002 last_local_state, last_remote_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010003 }
10004
10005 /* the active link width (downgrade) is 0 on link down */
10006 ppd->link_width_active = 0;
10007 ppd->link_width_downgrade_tx_active = 0;
10008 ppd->link_width_downgrade_rx_active = 0;
10009 ppd->current_egress_rate = 0;
10010 return 0;
10011}
10012
10013/* return the link state name */
10014static const char *link_state_name(u32 state)
10015{
10016 const char *name;
10017 int n = ilog2(state);
10018 static const char * const names[] = {
10019 [__HLS_UP_INIT_BP] = "INIT",
10020 [__HLS_UP_ARMED_BP] = "ARMED",
10021 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10022 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10023 [__HLS_DN_POLL_BP] = "POLL",
10024 [__HLS_DN_DISABLE_BP] = "DISABLE",
10025 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10026 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10027 [__HLS_GOING_UP_BP] = "GOING_UP",
10028 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10029 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10030 };
10031
10032 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10033 return name ? name : "unknown";
10034}
10035
10036/* return the link state reason name */
10037static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10038{
10039 if (state == HLS_UP_INIT) {
10040 switch (ppd->linkinit_reason) {
10041 case OPA_LINKINIT_REASON_LINKUP:
10042 return "(LINKUP)";
10043 case OPA_LINKINIT_REASON_FLAPPING:
10044 return "(FLAPPING)";
10045 case OPA_LINKINIT_OUTSIDE_POLICY:
10046 return "(OUTSIDE_POLICY)";
10047 case OPA_LINKINIT_QUARANTINED:
10048 return "(QUARANTINED)";
10049 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10050 return "(INSUFIC_CAPABILITY)";
10051 default:
10052 break;
10053 }
10054 }
10055 return "";
10056}
10057
10058/*
10059 * driver_physical_state - convert the driver's notion of a port's
10060 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10061 * Return -1 (converted to a u32) to indicate error.
10062 */
10063u32 driver_physical_state(struct hfi1_pportdata *ppd)
10064{
10065 switch (ppd->host_link_state) {
10066 case HLS_UP_INIT:
10067 case HLS_UP_ARMED:
10068 case HLS_UP_ACTIVE:
10069 return IB_PORTPHYSSTATE_LINKUP;
10070 case HLS_DN_POLL:
10071 return IB_PORTPHYSSTATE_POLLING;
10072 case HLS_DN_DISABLE:
10073 return IB_PORTPHYSSTATE_DISABLED;
10074 case HLS_DN_OFFLINE:
10075 return OPA_PORTPHYSSTATE_OFFLINE;
10076 case HLS_VERIFY_CAP:
10077 return IB_PORTPHYSSTATE_POLLING;
10078 case HLS_GOING_UP:
10079 return IB_PORTPHYSSTATE_POLLING;
10080 case HLS_GOING_OFFLINE:
10081 return OPA_PORTPHYSSTATE_OFFLINE;
10082 case HLS_LINK_COOLDOWN:
10083 return OPA_PORTPHYSSTATE_OFFLINE;
10084 case HLS_DN_DOWNDEF:
10085 default:
10086 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10087 ppd->host_link_state);
10088 return -1;
10089 }
10090}
10091
10092/*
10093 * driver_logical_state - convert the driver's notion of a port's
10094 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10095 * (converted to a u32) to indicate error.
10096 */
10097u32 driver_logical_state(struct hfi1_pportdata *ppd)
10098{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010099 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010100 return IB_PORT_DOWN;
10101
10102 switch (ppd->host_link_state & HLS_UP) {
10103 case HLS_UP_INIT:
10104 return IB_PORT_INIT;
10105 case HLS_UP_ARMED:
10106 return IB_PORT_ARMED;
10107 case HLS_UP_ACTIVE:
10108 return IB_PORT_ACTIVE;
10109 default:
10110 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10111 ppd->host_link_state);
10112 return -1;
10113 }
10114}
10115
10116void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10117 u8 neigh_reason, u8 rem_reason)
10118{
10119 if (ppd->local_link_down_reason.latest == 0 &&
10120 ppd->neigh_link_down_reason.latest == 0) {
10121 ppd->local_link_down_reason.latest = lcl_reason;
10122 ppd->neigh_link_down_reason.latest = neigh_reason;
10123 ppd->remote_link_down_reason = rem_reason;
10124 }
10125}
10126
10127/*
10128 * Change the physical and/or logical link state.
10129 *
10130 * Do not call this routine while inside an interrupt. It contains
10131 * calls to routines that can take multiple seconds to finish.
10132 *
10133 * Returns 0 on success, -errno on failure.
10134 */
10135int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10136{
10137 struct hfi1_devdata *dd = ppd->dd;
10138 struct ib_event event = {.device = NULL};
10139 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010140 int orig_new_state, poll_bounce;
10141
10142 mutex_lock(&ppd->hls_lock);
10143
10144 orig_new_state = state;
10145 if (state == HLS_DN_DOWNDEF)
10146 state = dd->link_default;
10147
10148 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010149 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10150 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010151
10152 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010153 link_state_name(ppd->host_link_state),
10154 link_state_name(orig_new_state),
10155 poll_bounce ? "(bounce) " : "",
10156 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010157
Mike Marciniszyn77241052015-07-30 15:17:43 -040010158 /*
10159 * If we're going to a (HLS_*) link state that implies the logical
10160 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10161 * reset is_sm_config_started to 0.
10162 */
10163 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10164 ppd->is_sm_config_started = 0;
10165
10166 /*
10167 * Do nothing if the states match. Let a poll to poll link bounce
10168 * go through.
10169 */
10170 if (ppd->host_link_state == state && !poll_bounce)
10171 goto done;
10172
10173 switch (state) {
10174 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010175 if (ppd->host_link_state == HLS_DN_POLL &&
10176 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010177 /*
10178 * Quick link up jumps from polling to here.
10179 *
10180 * Whether in normal or loopback mode, the
10181 * simulator jumps from polling to link up.
10182 * Accept that here.
10183 */
Jubin John17fb4f22016-02-14 20:21:52 -080010184 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010185 } else if (ppd->host_link_state != HLS_GOING_UP) {
10186 goto unexpected;
10187 }
10188
10189 ppd->host_link_state = HLS_UP_INIT;
10190 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10191 if (ret) {
10192 /* logical state didn't change, stay at going_up */
10193 ppd->host_link_state = HLS_GOING_UP;
10194 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010195 "%s: logical state did not change to INIT\n",
10196 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010197 } else {
10198 /* clear old transient LINKINIT_REASON code */
10199 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10200 ppd->linkinit_reason =
10201 OPA_LINKINIT_REASON_LINKUP;
10202
10203 /* enable the port */
10204 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10205
10206 handle_linkup_change(dd, 1);
10207 }
10208 break;
10209 case HLS_UP_ARMED:
10210 if (ppd->host_link_state != HLS_UP_INIT)
10211 goto unexpected;
10212
10213 ppd->host_link_state = HLS_UP_ARMED;
10214 set_logical_state(dd, LSTATE_ARMED);
10215 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10216 if (ret) {
10217 /* logical state didn't change, stay at init */
10218 ppd->host_link_state = HLS_UP_INIT;
10219 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010220 "%s: logical state did not change to ARMED\n",
10221 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010222 }
10223 /*
10224 * The simulator does not currently implement SMA messages,
10225 * so neighbor_normal is not set. Set it here when we first
10226 * move to Armed.
10227 */
10228 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10229 ppd->neighbor_normal = 1;
10230 break;
10231 case HLS_UP_ACTIVE:
10232 if (ppd->host_link_state != HLS_UP_ARMED)
10233 goto unexpected;
10234
10235 ppd->host_link_state = HLS_UP_ACTIVE;
10236 set_logical_state(dd, LSTATE_ACTIVE);
10237 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10238 if (ret) {
10239 /* logical state didn't change, stay at armed */
10240 ppd->host_link_state = HLS_UP_ARMED;
10241 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010242 "%s: logical state did not change to ACTIVE\n",
10243 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010244 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010245 /* tell all engines to go running */
10246 sdma_all_running(dd);
10247
10248 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010249 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010250 event.element.port_num = ppd->port;
10251 event.event = IB_EVENT_PORT_ACTIVE;
10252 }
10253 break;
10254 case HLS_DN_POLL:
10255 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10256 ppd->host_link_state == HLS_DN_OFFLINE) &&
10257 dd->dc_shutdown)
10258 dc_start(dd);
10259 /* Hand LED control to the DC */
10260 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10261
10262 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10263 u8 tmp = ppd->link_enabled;
10264
10265 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10266 if (ret) {
10267 ppd->link_enabled = tmp;
10268 break;
10269 }
10270 ppd->remote_link_down_reason = 0;
10271
10272 if (ppd->driver_link_ready)
10273 ppd->link_enabled = 1;
10274 }
10275
Jim Snowfb9036d2016-01-11 18:32:21 -050010276 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010277 ret = set_local_link_attributes(ppd);
10278 if (ret)
10279 break;
10280
10281 ppd->port_error_action = 0;
10282 ppd->host_link_state = HLS_DN_POLL;
10283
10284 if (quick_linkup) {
10285 /* quick linkup does not go into polling */
10286 ret = do_quick_linkup(dd);
10287 } else {
10288 ret1 = set_physical_link_state(dd, PLS_POLLING);
10289 if (ret1 != HCMD_SUCCESS) {
10290 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010291 "Failed to transition to Polling link state, return 0x%x\n",
10292 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010293 ret = -EINVAL;
10294 }
10295 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010296 ppd->offline_disabled_reason =
10297 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010298 /*
10299 * If an error occurred above, go back to offline. The
10300 * caller may reschedule another attempt.
10301 */
10302 if (ret)
10303 goto_offline(ppd, 0);
10304 break;
10305 case HLS_DN_DISABLE:
10306 /* link is disabled */
10307 ppd->link_enabled = 0;
10308
10309 /* allow any state to transition to disabled */
10310
10311 /* must transition to offline first */
10312 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10313 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10314 if (ret)
10315 break;
10316 ppd->remote_link_down_reason = 0;
10317 }
10318
10319 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10320 if (ret1 != HCMD_SUCCESS) {
10321 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010322 "Failed to transition to Disabled link state, return 0x%x\n",
10323 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010324 ret = -EINVAL;
10325 break;
10326 }
10327 ppd->host_link_state = HLS_DN_DISABLE;
10328 dc_shutdown(dd);
10329 break;
10330 case HLS_DN_OFFLINE:
10331 if (ppd->host_link_state == HLS_DN_DISABLE)
10332 dc_start(dd);
10333
10334 /* allow any state to transition to offline */
10335 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10336 if (!ret)
10337 ppd->remote_link_down_reason = 0;
10338 break;
10339 case HLS_VERIFY_CAP:
10340 if (ppd->host_link_state != HLS_DN_POLL)
10341 goto unexpected;
10342 ppd->host_link_state = HLS_VERIFY_CAP;
10343 break;
10344 case HLS_GOING_UP:
10345 if (ppd->host_link_state != HLS_VERIFY_CAP)
10346 goto unexpected;
10347
10348 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10349 if (ret1 != HCMD_SUCCESS) {
10350 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010351 "Failed to transition to link up state, return 0x%x\n",
10352 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010353 ret = -EINVAL;
10354 break;
10355 }
10356 ppd->host_link_state = HLS_GOING_UP;
10357 break;
10358
10359 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10360 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10361 default:
10362 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010363 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010364 ret = -EINVAL;
10365 break;
10366 }
10367
Mike Marciniszyn77241052015-07-30 15:17:43 -040010368 goto done;
10369
10370unexpected:
10371 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010372 __func__, link_state_name(ppd->host_link_state),
10373 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010374 ret = -EINVAL;
10375
10376done:
10377 mutex_unlock(&ppd->hls_lock);
10378
10379 if (event.device)
10380 ib_dispatch_event(&event);
10381
10382 return ret;
10383}
10384
10385int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10386{
10387 u64 reg;
10388 int ret = 0;
10389
10390 switch (which) {
10391 case HFI1_IB_CFG_LIDLMC:
10392 set_lidlmc(ppd);
10393 break;
10394 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10395 /*
10396 * The VL Arbitrator high limit is sent in units of 4k
10397 * bytes, while HFI stores it in units of 64 bytes.
10398 */
Jubin John8638b772016-02-14 20:19:24 -080010399 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010400 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10401 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10402 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10403 break;
10404 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10405 /* HFI only supports POLL as the default link down state */
10406 if (val != HLS_DN_POLL)
10407 ret = -EINVAL;
10408 break;
10409 case HFI1_IB_CFG_OP_VLS:
10410 if (ppd->vls_operational != val) {
10411 ppd->vls_operational = val;
10412 if (!ppd->port)
10413 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010414 }
10415 break;
10416 /*
10417 * For link width, link width downgrade, and speed enable, always AND
10418 * the setting with what is actually supported. This has two benefits.
10419 * First, enabled can't have unsupported values, no matter what the
10420 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10421 * "fill in with your supported value" have all the bits in the
10422 * field set, so simply ANDing with supported has the desired result.
10423 */
10424 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10425 ppd->link_width_enabled = val & ppd->link_width_supported;
10426 break;
10427 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10428 ppd->link_width_downgrade_enabled =
10429 val & ppd->link_width_downgrade_supported;
10430 break;
10431 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10432 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10433 break;
10434 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10435 /*
10436 * HFI does not follow IB specs, save this value
10437 * so we can report it, if asked.
10438 */
10439 ppd->overrun_threshold = val;
10440 break;
10441 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10442 /*
10443 * HFI does not follow IB specs, save this value
10444 * so we can report it, if asked.
10445 */
10446 ppd->phy_error_threshold = val;
10447 break;
10448
10449 case HFI1_IB_CFG_MTU:
10450 set_send_length(ppd);
10451 break;
10452
10453 case HFI1_IB_CFG_PKEYS:
10454 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10455 set_partition_keys(ppd);
10456 break;
10457
10458 default:
10459 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10460 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010461 "%s: which %s, val 0x%x: not implemented\n",
10462 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010463 break;
10464 }
10465 return ret;
10466}
10467
10468/* begin functions related to vl arbitration table caching */
10469static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10470{
10471 int i;
10472
10473 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10474 VL_ARB_LOW_PRIO_TABLE_SIZE);
10475 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10476 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10477
10478 /*
10479 * Note that we always return values directly from the
10480 * 'vl_arb_cache' (and do no CSR reads) in response to a
10481 * 'Get(VLArbTable)'. This is obviously correct after a
10482 * 'Set(VLArbTable)', since the cache will then be up to
10483 * date. But it's also correct prior to any 'Set(VLArbTable)'
10484 * since then both the cache, and the relevant h/w registers
10485 * will be zeroed.
10486 */
10487
10488 for (i = 0; i < MAX_PRIO_TABLE; i++)
10489 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10490}
10491
10492/*
10493 * vl_arb_lock_cache
10494 *
10495 * All other vl_arb_* functions should be called only after locking
10496 * the cache.
10497 */
10498static inline struct vl_arb_cache *
10499vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10500{
10501 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10502 return NULL;
10503 spin_lock(&ppd->vl_arb_cache[idx].lock);
10504 return &ppd->vl_arb_cache[idx];
10505}
10506
10507static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10508{
10509 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10510}
10511
10512static void vl_arb_get_cache(struct vl_arb_cache *cache,
10513 struct ib_vl_weight_elem *vl)
10514{
10515 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10516}
10517
10518static void vl_arb_set_cache(struct vl_arb_cache *cache,
10519 struct ib_vl_weight_elem *vl)
10520{
10521 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10522}
10523
10524static int vl_arb_match_cache(struct vl_arb_cache *cache,
10525 struct ib_vl_weight_elem *vl)
10526{
10527 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10528}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010529
Mike Marciniszyn77241052015-07-30 15:17:43 -040010530/* end functions related to vl arbitration table caching */
10531
10532static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10533 u32 size, struct ib_vl_weight_elem *vl)
10534{
10535 struct hfi1_devdata *dd = ppd->dd;
10536 u64 reg;
10537 unsigned int i, is_up = 0;
10538 int drain, ret = 0;
10539
10540 mutex_lock(&ppd->hls_lock);
10541
10542 if (ppd->host_link_state & HLS_UP)
10543 is_up = 1;
10544
10545 drain = !is_ax(dd) && is_up;
10546
10547 if (drain)
10548 /*
10549 * Before adjusting VL arbitration weights, empty per-VL
10550 * FIFOs, otherwise a packet whose VL weight is being
10551 * set to 0 could get stuck in a FIFO with no chance to
10552 * egress.
10553 */
10554 ret = stop_drain_data_vls(dd);
10555
10556 if (ret) {
10557 dd_dev_err(
10558 dd,
10559 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10560 __func__);
10561 goto err;
10562 }
10563
10564 for (i = 0; i < size; i++, vl++) {
10565 /*
10566 * NOTE: The low priority shift and mask are used here, but
10567 * they are the same for both the low and high registers.
10568 */
10569 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10570 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10571 | (((u64)vl->weight
10572 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10573 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10574 write_csr(dd, target + (i * 8), reg);
10575 }
10576 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10577
10578 if (drain)
10579 open_fill_data_vls(dd); /* reopen all VLs */
10580
10581err:
10582 mutex_unlock(&ppd->hls_lock);
10583
10584 return ret;
10585}
10586
10587/*
10588 * Read one credit merge VL register.
10589 */
10590static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10591 struct vl_limit *vll)
10592{
10593 u64 reg = read_csr(dd, csr);
10594
10595 vll->dedicated = cpu_to_be16(
10596 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10597 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10598 vll->shared = cpu_to_be16(
10599 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10600 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10601}
10602
10603/*
10604 * Read the current credit merge limits.
10605 */
10606static int get_buffer_control(struct hfi1_devdata *dd,
10607 struct buffer_control *bc, u16 *overall_limit)
10608{
10609 u64 reg;
10610 int i;
10611
10612 /* not all entries are filled in */
10613 memset(bc, 0, sizeof(*bc));
10614
10615 /* OPA and HFI have a 1-1 mapping */
10616 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010617 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010618
10619 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10620 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10621
10622 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10623 bc->overall_shared_limit = cpu_to_be16(
10624 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10625 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10626 if (overall_limit)
10627 *overall_limit = (reg
10628 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10629 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10630 return sizeof(struct buffer_control);
10631}
10632
10633static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10634{
10635 u64 reg;
10636 int i;
10637
10638 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10639 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10640 for (i = 0; i < sizeof(u64); i++) {
10641 u8 byte = *(((u8 *)&reg) + i);
10642
10643 dp->vlnt[2 * i] = byte & 0xf;
10644 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10645 }
10646
10647 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10648 for (i = 0; i < sizeof(u64); i++) {
10649 u8 byte = *(((u8 *)&reg) + i);
10650
10651 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10652 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10653 }
10654 return sizeof(struct sc2vlnt);
10655}
10656
10657static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10658 struct ib_vl_weight_elem *vl)
10659{
10660 unsigned int i;
10661
10662 for (i = 0; i < nelems; i++, vl++) {
10663 vl->vl = 0xf;
10664 vl->weight = 0;
10665 }
10666}
10667
10668static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10669{
10670 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010671 DC_SC_VL_VAL(15_0,
10672 0, dp->vlnt[0] & 0xf,
10673 1, dp->vlnt[1] & 0xf,
10674 2, dp->vlnt[2] & 0xf,
10675 3, dp->vlnt[3] & 0xf,
10676 4, dp->vlnt[4] & 0xf,
10677 5, dp->vlnt[5] & 0xf,
10678 6, dp->vlnt[6] & 0xf,
10679 7, dp->vlnt[7] & 0xf,
10680 8, dp->vlnt[8] & 0xf,
10681 9, dp->vlnt[9] & 0xf,
10682 10, dp->vlnt[10] & 0xf,
10683 11, dp->vlnt[11] & 0xf,
10684 12, dp->vlnt[12] & 0xf,
10685 13, dp->vlnt[13] & 0xf,
10686 14, dp->vlnt[14] & 0xf,
10687 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010688 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080010689 DC_SC_VL_VAL(31_16,
10690 16, dp->vlnt[16] & 0xf,
10691 17, dp->vlnt[17] & 0xf,
10692 18, dp->vlnt[18] & 0xf,
10693 19, dp->vlnt[19] & 0xf,
10694 20, dp->vlnt[20] & 0xf,
10695 21, dp->vlnt[21] & 0xf,
10696 22, dp->vlnt[22] & 0xf,
10697 23, dp->vlnt[23] & 0xf,
10698 24, dp->vlnt[24] & 0xf,
10699 25, dp->vlnt[25] & 0xf,
10700 26, dp->vlnt[26] & 0xf,
10701 27, dp->vlnt[27] & 0xf,
10702 28, dp->vlnt[28] & 0xf,
10703 29, dp->vlnt[29] & 0xf,
10704 30, dp->vlnt[30] & 0xf,
10705 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010706}
10707
10708static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10709 u16 limit)
10710{
10711 if (limit != 0)
10712 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010713 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010714}
10715
10716/* change only the shared limit portion of SendCmGLobalCredit */
10717static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10718{
10719 u64 reg;
10720
10721 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10722 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10723 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10724 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10725}
10726
10727/* change only the total credit limit portion of SendCmGLobalCredit */
10728static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10729{
10730 u64 reg;
10731
10732 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10733 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10734 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10735 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10736}
10737
10738/* set the given per-VL shared limit */
10739static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10740{
10741 u64 reg;
10742 u32 addr;
10743
10744 if (vl < TXE_NUM_DATA_VL)
10745 addr = SEND_CM_CREDIT_VL + (8 * vl);
10746 else
10747 addr = SEND_CM_CREDIT_VL15;
10748
10749 reg = read_csr(dd, addr);
10750 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10751 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10752 write_csr(dd, addr, reg);
10753}
10754
10755/* set the given per-VL dedicated limit */
10756static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10757{
10758 u64 reg;
10759 u32 addr;
10760
10761 if (vl < TXE_NUM_DATA_VL)
10762 addr = SEND_CM_CREDIT_VL + (8 * vl);
10763 else
10764 addr = SEND_CM_CREDIT_VL15;
10765
10766 reg = read_csr(dd, addr);
10767 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10768 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10769 write_csr(dd, addr, reg);
10770}
10771
10772/* spin until the given per-VL status mask bits clear */
10773static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10774 const char *which)
10775{
10776 unsigned long timeout;
10777 u64 reg;
10778
10779 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10780 while (1) {
10781 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10782
10783 if (reg == 0)
10784 return; /* success */
10785 if (time_after(jiffies, timeout))
10786 break; /* timed out */
10787 udelay(1);
10788 }
10789
10790 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010791 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10792 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010793 /*
10794 * If this occurs, it is likely there was a credit loss on the link.
10795 * The only recovery from that is a link bounce.
10796 */
10797 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010798 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010799}
10800
10801/*
10802 * The number of credits on the VLs may be changed while everything
10803 * is "live", but the following algorithm must be followed due to
10804 * how the hardware is actually implemented. In particular,
10805 * Return_Credit_Status[] is the only correct status check.
10806 *
10807 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10808 * set Global_Shared_Credit_Limit = 0
10809 * use_all_vl = 1
10810 * mask0 = all VLs that are changing either dedicated or shared limits
10811 * set Shared_Limit[mask0] = 0
10812 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10813 * if (changing any dedicated limit)
10814 * mask1 = all VLs that are lowering dedicated limits
10815 * lower Dedicated_Limit[mask1]
10816 * spin until Return_Credit_Status[mask1] == 0
10817 * raise Dedicated_Limits
10818 * raise Shared_Limits
10819 * raise Global_Shared_Credit_Limit
10820 *
10821 * lower = if the new limit is lower, set the limit to the new value
10822 * raise = if the new limit is higher than the current value (may be changed
10823 * earlier in the algorithm), set the new limit to the new value
10824 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010825int set_buffer_control(struct hfi1_pportdata *ppd,
10826 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010827{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010828 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010829 u64 changing_mask, ld_mask, stat_mask;
10830 int change_count;
10831 int i, use_all_mask;
10832 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010833 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010834 /*
10835 * A0: add the variable any_shared_limit_changing below and in the
10836 * algorithm above. If removing A0 support, it can be removed.
10837 */
10838 int any_shared_limit_changing;
10839 struct buffer_control cur_bc;
10840 u8 changing[OPA_MAX_VLS];
10841 u8 lowering_dedicated[OPA_MAX_VLS];
10842 u16 cur_total;
10843 u32 new_total = 0;
10844 const u64 all_mask =
10845 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10846 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10847 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10848 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10849 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10850 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10851 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10852 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10853 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10854
10855#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10856#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10857
Mike Marciniszyn77241052015-07-30 15:17:43 -040010858 /* find the new total credits, do sanity check on unused VLs */
10859 for (i = 0; i < OPA_MAX_VLS; i++) {
10860 if (valid_vl(i)) {
10861 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10862 continue;
10863 }
10864 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080010865 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010866 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080010867 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010868 new_bc->vl[i].dedicated = 0;
10869 new_bc->vl[i].shared = 0;
10870 }
10871 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010872
Mike Marciniszyn77241052015-07-30 15:17:43 -040010873 /* fetch the current values */
10874 get_buffer_control(dd, &cur_bc, &cur_total);
10875
10876 /*
10877 * Create the masks we will use.
10878 */
10879 memset(changing, 0, sizeof(changing));
10880 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080010881 /*
10882 * NOTE: Assumes that the individual VL bits are adjacent and in
10883 * increasing order
10884 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010885 stat_mask =
10886 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10887 changing_mask = 0;
10888 ld_mask = 0;
10889 change_count = 0;
10890 any_shared_limit_changing = 0;
10891 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10892 if (!valid_vl(i))
10893 continue;
10894 this_shared_changing = new_bc->vl[i].shared
10895 != cur_bc.vl[i].shared;
10896 if (this_shared_changing)
10897 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010898 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10899 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010900 changing[i] = 1;
10901 changing_mask |= stat_mask;
10902 change_count++;
10903 }
10904 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10905 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10906 lowering_dedicated[i] = 1;
10907 ld_mask |= stat_mask;
10908 }
10909 }
10910
10911 /* bracket the credit change with a total adjustment */
10912 if (new_total > cur_total)
10913 set_global_limit(dd, new_total);
10914
10915 /*
10916 * Start the credit change algorithm.
10917 */
10918 use_all_mask = 0;
10919 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010920 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10921 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010922 set_global_shared(dd, 0);
10923 cur_bc.overall_shared_limit = 0;
10924 use_all_mask = 1;
10925 }
10926
10927 for (i = 0; i < NUM_USABLE_VLS; i++) {
10928 if (!valid_vl(i))
10929 continue;
10930
10931 if (changing[i]) {
10932 set_vl_shared(dd, i, 0);
10933 cur_bc.vl[i].shared = 0;
10934 }
10935 }
10936
10937 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080010938 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010939
10940 if (change_count > 0) {
10941 for (i = 0; i < NUM_USABLE_VLS; i++) {
10942 if (!valid_vl(i))
10943 continue;
10944
10945 if (lowering_dedicated[i]) {
10946 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010947 be16_to_cpu(new_bc->
10948 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010949 cur_bc.vl[i].dedicated =
10950 new_bc->vl[i].dedicated;
10951 }
10952 }
10953
10954 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10955
10956 /* now raise all dedicated that are going up */
10957 for (i = 0; i < NUM_USABLE_VLS; i++) {
10958 if (!valid_vl(i))
10959 continue;
10960
10961 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10962 be16_to_cpu(cur_bc.vl[i].dedicated))
10963 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010964 be16_to_cpu(new_bc->
10965 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010966 }
10967 }
10968
10969 /* next raise all shared that are going up */
10970 for (i = 0; i < NUM_USABLE_VLS; i++) {
10971 if (!valid_vl(i))
10972 continue;
10973
10974 if (be16_to_cpu(new_bc->vl[i].shared) >
10975 be16_to_cpu(cur_bc.vl[i].shared))
10976 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10977 }
10978
10979 /* finally raise the global shared */
10980 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080010981 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010982 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010983 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010984
10985 /* bracket the credit change with a total adjustment */
10986 if (new_total < cur_total)
10987 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010988
10989 /*
10990 * Determine the actual number of operational VLS using the number of
10991 * dedicated and shared credits for each VL.
10992 */
10993 if (change_count > 0) {
10994 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10995 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10996 be16_to_cpu(new_bc->vl[i].shared) > 0)
10997 vl_count++;
10998 ppd->actual_vls_operational = vl_count;
10999 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11000 ppd->actual_vls_operational :
11001 ppd->vls_operational,
11002 NULL);
11003 if (ret == 0)
11004 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11005 ppd->actual_vls_operational :
11006 ppd->vls_operational, NULL);
11007 if (ret)
11008 return ret;
11009 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011010 return 0;
11011}
11012
11013/*
11014 * Read the given fabric manager table. Return the size of the
11015 * table (in bytes) on success, and a negative error code on
11016 * failure.
11017 */
11018int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11019
11020{
11021 int size;
11022 struct vl_arb_cache *vlc;
11023
11024 switch (which) {
11025 case FM_TBL_VL_HIGH_ARB:
11026 size = 256;
11027 /*
11028 * OPA specifies 128 elements (of 2 bytes each), though
11029 * HFI supports only 16 elements in h/w.
11030 */
11031 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11032 vl_arb_get_cache(vlc, t);
11033 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11034 break;
11035 case FM_TBL_VL_LOW_ARB:
11036 size = 256;
11037 /*
11038 * OPA specifies 128 elements (of 2 bytes each), though
11039 * HFI supports only 16 elements in h/w.
11040 */
11041 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11042 vl_arb_get_cache(vlc, t);
11043 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11044 break;
11045 case FM_TBL_BUFFER_CONTROL:
11046 size = get_buffer_control(ppd->dd, t, NULL);
11047 break;
11048 case FM_TBL_SC2VLNT:
11049 size = get_sc2vlnt(ppd->dd, t);
11050 break;
11051 case FM_TBL_VL_PREEMPT_ELEMS:
11052 size = 256;
11053 /* OPA specifies 128 elements, of 2 bytes each */
11054 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11055 break;
11056 case FM_TBL_VL_PREEMPT_MATRIX:
11057 size = 256;
11058 /*
11059 * OPA specifies that this is the same size as the VL
11060 * arbitration tables (i.e., 256 bytes).
11061 */
11062 break;
11063 default:
11064 return -EINVAL;
11065 }
11066 return size;
11067}
11068
11069/*
11070 * Write the given fabric manager table.
11071 */
11072int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11073{
11074 int ret = 0;
11075 struct vl_arb_cache *vlc;
11076
11077 switch (which) {
11078 case FM_TBL_VL_HIGH_ARB:
11079 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11080 if (vl_arb_match_cache(vlc, t)) {
11081 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11082 break;
11083 }
11084 vl_arb_set_cache(vlc, t);
11085 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11086 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11087 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11088 break;
11089 case FM_TBL_VL_LOW_ARB:
11090 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11091 if (vl_arb_match_cache(vlc, t)) {
11092 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11093 break;
11094 }
11095 vl_arb_set_cache(vlc, t);
11096 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11097 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11098 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11099 break;
11100 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011101 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011102 break;
11103 case FM_TBL_SC2VLNT:
11104 set_sc2vlnt(ppd->dd, t);
11105 break;
11106 default:
11107 ret = -EINVAL;
11108 }
11109 return ret;
11110}
11111
11112/*
11113 * Disable all data VLs.
11114 *
11115 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11116 */
11117static int disable_data_vls(struct hfi1_devdata *dd)
11118{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011119 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011120 return 1;
11121
11122 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11123
11124 return 0;
11125}
11126
11127/*
11128 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11129 * Just re-enables all data VLs (the "fill" part happens
11130 * automatically - the name was chosen for symmetry with
11131 * stop_drain_data_vls()).
11132 *
11133 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11134 */
11135int open_fill_data_vls(struct hfi1_devdata *dd)
11136{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011137 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011138 return 1;
11139
11140 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11141
11142 return 0;
11143}
11144
11145/*
11146 * drain_data_vls() - assumes that disable_data_vls() has been called,
11147 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11148 * engines to drop to 0.
11149 */
11150static void drain_data_vls(struct hfi1_devdata *dd)
11151{
11152 sc_wait(dd);
11153 sdma_wait(dd);
11154 pause_for_credit_return(dd);
11155}
11156
11157/*
11158 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11159 *
11160 * Use open_fill_data_vls() to resume using data VLs. This pair is
11161 * meant to be used like this:
11162 *
11163 * stop_drain_data_vls(dd);
11164 * // do things with per-VL resources
11165 * open_fill_data_vls(dd);
11166 */
11167int stop_drain_data_vls(struct hfi1_devdata *dd)
11168{
11169 int ret;
11170
11171 ret = disable_data_vls(dd);
11172 if (ret == 0)
11173 drain_data_vls(dd);
11174
11175 return ret;
11176}
11177
11178/*
11179 * Convert a nanosecond time to a cclock count. No matter how slow
11180 * the cclock, a non-zero ns will always have a non-zero result.
11181 */
11182u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11183{
11184 u32 cclocks;
11185
11186 if (dd->icode == ICODE_FPGA_EMULATION)
11187 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11188 else /* simulation pretends to be ASIC */
11189 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11190 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11191 cclocks = 1;
11192 return cclocks;
11193}
11194
11195/*
11196 * Convert a cclock count to nanoseconds. Not matter how slow
11197 * the cclock, a non-zero cclocks will always have a non-zero result.
11198 */
11199u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11200{
11201 u32 ns;
11202
11203 if (dd->icode == ICODE_FPGA_EMULATION)
11204 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11205 else /* simulation pretends to be ASIC */
11206 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11207 if (cclocks && !ns)
11208 ns = 1;
11209 return ns;
11210}
11211
11212/*
11213 * Dynamically adjust the receive interrupt timeout for a context based on
11214 * incoming packet rate.
11215 *
11216 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11217 */
11218static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11219{
11220 struct hfi1_devdata *dd = rcd->dd;
11221 u32 timeout = rcd->rcvavail_timeout;
11222
11223 /*
11224 * This algorithm doubles or halves the timeout depending on whether
11225 * the number of packets received in this interrupt were less than or
11226 * greater equal the interrupt count.
11227 *
11228 * The calculations below do not allow a steady state to be achieved.
11229 * Only at the endpoints it is possible to have an unchanging
11230 * timeout.
11231 */
11232 if (npkts < rcv_intr_count) {
11233 /*
11234 * Not enough packets arrived before the timeout, adjust
11235 * timeout downward.
11236 */
11237 if (timeout < 2) /* already at minimum? */
11238 return;
11239 timeout >>= 1;
11240 } else {
11241 /*
11242 * More than enough packets arrived before the timeout, adjust
11243 * timeout upward.
11244 */
11245 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11246 return;
11247 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11248 }
11249
11250 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011251 /*
11252 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11253 * been verified to be in range
11254 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011255 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011256 (u64)timeout <<
11257 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011258}
11259
11260void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11261 u32 intr_adjust, u32 npkts)
11262{
11263 struct hfi1_devdata *dd = rcd->dd;
11264 u64 reg;
11265 u32 ctxt = rcd->ctxt;
11266
11267 /*
11268 * Need to write timeout register before updating RcvHdrHead to ensure
11269 * that a new value is used when the HW decides to restart counting.
11270 */
11271 if (intr_adjust)
11272 adjust_rcv_timeout(rcd, npkts);
11273 if (updegr) {
11274 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11275 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11276 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11277 }
11278 mmiowb();
11279 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11280 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11281 << RCV_HDR_HEAD_HEAD_SHIFT);
11282 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11283 mmiowb();
11284}
11285
11286u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11287{
11288 u32 head, tail;
11289
11290 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11291 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11292
11293 if (rcd->rcvhdrtail_kvaddr)
11294 tail = get_rcvhdrtail(rcd);
11295 else
11296 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11297
11298 return head == tail;
11299}
11300
11301/*
11302 * Context Control and Receive Array encoding for buffer size:
11303 * 0x0 invalid
11304 * 0x1 4 KB
11305 * 0x2 8 KB
11306 * 0x3 16 KB
11307 * 0x4 32 KB
11308 * 0x5 64 KB
11309 * 0x6 128 KB
11310 * 0x7 256 KB
11311 * 0x8 512 KB (Receive Array only)
11312 * 0x9 1 MB (Receive Array only)
11313 * 0xa 2 MB (Receive Array only)
11314 *
11315 * 0xB-0xF - reserved (Receive Array only)
11316 *
11317 *
11318 * This routine assumes that the value has already been sanity checked.
11319 */
11320static u32 encoded_size(u32 size)
11321{
11322 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011323 case 4 * 1024: return 0x1;
11324 case 8 * 1024: return 0x2;
11325 case 16 * 1024: return 0x3;
11326 case 32 * 1024: return 0x4;
11327 case 64 * 1024: return 0x5;
11328 case 128 * 1024: return 0x6;
11329 case 256 * 1024: return 0x7;
11330 case 512 * 1024: return 0x8;
11331 case 1 * 1024 * 1024: return 0x9;
11332 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011333 }
11334 return 0x1; /* if invalid, go with the minimum size */
11335}
11336
11337void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11338{
11339 struct hfi1_ctxtdata *rcd;
11340 u64 rcvctrl, reg;
11341 int did_enable = 0;
11342
11343 rcd = dd->rcd[ctxt];
11344 if (!rcd)
11345 return;
11346
11347 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11348
11349 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11350 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011351 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11352 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011353 /* reset the tail and hdr addresses, and sequence count */
11354 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11355 rcd->rcvhdrq_phys);
11356 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11357 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11358 rcd->rcvhdrqtailaddr_phys);
11359 rcd->seq_cnt = 1;
11360
11361 /* reset the cached receive header queue head value */
11362 rcd->head = 0;
11363
11364 /*
11365 * Zero the receive header queue so we don't get false
11366 * positives when checking the sequence number. The
11367 * sequence numbers could land exactly on the same spot.
11368 * E.g. a rcd restart before the receive header wrapped.
11369 */
11370 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11371
11372 /* starting timeout */
11373 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11374
11375 /* enable the context */
11376 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11377
11378 /* clean the egr buffer size first */
11379 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11380 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11381 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11382 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11383
11384 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11385 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11386 did_enable = 1;
11387
11388 /* zero RcvEgrIndexHead */
11389 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11390
11391 /* set eager count and base index */
11392 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11393 & RCV_EGR_CTRL_EGR_CNT_MASK)
11394 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11395 (((rcd->eager_base >> RCV_SHIFT)
11396 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11397 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11398 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11399
11400 /*
11401 * Set TID (expected) count and base index.
11402 * rcd->expected_count is set to individual RcvArray entries,
11403 * not pairs, and the CSR takes a pair-count in groups of
11404 * four, so divide by 8.
11405 */
11406 reg = (((rcd->expected_count >> RCV_SHIFT)
11407 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11408 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11409 (((rcd->expected_base >> RCV_SHIFT)
11410 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11411 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11412 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011413 if (ctxt == HFI1_CTRL_CTXT)
11414 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011415 }
11416 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11417 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011418 /*
11419 * When receive context is being disabled turn on tail
11420 * update with a dummy tail address and then disable
11421 * receive context.
11422 */
11423 if (dd->rcvhdrtail_dummy_physaddr) {
11424 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11425 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011426 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011427 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11428 }
11429
Mike Marciniszyn77241052015-07-30 15:17:43 -040011430 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11431 }
11432 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11433 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11434 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11435 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11436 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11437 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011438 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11439 /* See comment on RcvCtxtCtrl.TailUpd above */
11440 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11441 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11442 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011443 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11444 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11445 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11446 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11447 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011448 /*
11449 * In one-packet-per-eager mode, the size comes from
11450 * the RcvArray entry.
11451 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011452 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11453 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11454 }
11455 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11456 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11457 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11458 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11459 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11460 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11461 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11462 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11463 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11464 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11465 rcd->rcvctrl = rcvctrl;
11466 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11467 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11468
11469 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011470 if (did_enable &&
11471 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011472 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11473 if (reg != 0) {
11474 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011475 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011476 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11477 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11478 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11479 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11480 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11481 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011482 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011483 }
11484 }
11485
11486 if (did_enable) {
11487 /*
11488 * The interrupt timeout and count must be set after
11489 * the context is enabled to take effect.
11490 */
11491 /* set interrupt timeout */
11492 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011493 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011494 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11495
11496 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11497 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11498 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11499 }
11500
11501 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11502 /*
11503 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011504 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11505 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011506 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011507 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11508 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011509}
11510
Dean Luick582e05c2016-02-18 11:13:01 -080011511u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011512{
11513 int ret;
11514 u64 val = 0;
11515
11516 if (namep) {
11517 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011518 *namep = dd->cntrnames;
11519 } else {
11520 const struct cntr_entry *entry;
11521 int i, j;
11522
11523 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011524
11525 /* Get the start of the block of counters */
11526 *cntrp = dd->cntrs;
11527
11528 /*
11529 * Now go and fill in each counter in the block.
11530 */
11531 for (i = 0; i < DEV_CNTR_LAST; i++) {
11532 entry = &dev_cntrs[i];
11533 hfi1_cdbg(CNTR, "reading %s", entry->name);
11534 if (entry->flags & CNTR_DISABLED) {
11535 /* Nothing */
11536 hfi1_cdbg(CNTR, "\tDisabled\n");
11537 } else {
11538 if (entry->flags & CNTR_VL) {
11539 hfi1_cdbg(CNTR, "\tPer VL\n");
11540 for (j = 0; j < C_VL_COUNT; j++) {
11541 val = entry->rw_cntr(entry,
11542 dd, j,
11543 CNTR_MODE_R,
11544 0);
11545 hfi1_cdbg(
11546 CNTR,
11547 "\t\tRead 0x%llx for %d\n",
11548 val, j);
11549 dd->cntrs[entry->offset + j] =
11550 val;
11551 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011552 } else if (entry->flags & CNTR_SDMA) {
11553 hfi1_cdbg(CNTR,
11554 "\t Per SDMA Engine\n");
11555 for (j = 0; j < dd->chip_sdma_engines;
11556 j++) {
11557 val =
11558 entry->rw_cntr(entry, dd, j,
11559 CNTR_MODE_R, 0);
11560 hfi1_cdbg(CNTR,
11561 "\t\tRead 0x%llx for %d\n",
11562 val, j);
11563 dd->cntrs[entry->offset + j] =
11564 val;
11565 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011566 } else {
11567 val = entry->rw_cntr(entry, dd,
11568 CNTR_INVALID_VL,
11569 CNTR_MODE_R, 0);
11570 dd->cntrs[entry->offset] = val;
11571 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11572 }
11573 }
11574 }
11575 }
11576 return ret;
11577}
11578
11579/*
11580 * Used by sysfs to create files for hfi stats to read
11581 */
Dean Luick582e05c2016-02-18 11:13:01 -080011582u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011583{
11584 int ret;
11585 u64 val = 0;
11586
11587 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011588 ret = ppd->dd->portcntrnameslen;
11589 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011590 } else {
11591 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011592 int i, j;
11593
Dean Luick582e05c2016-02-18 11:13:01 -080011594 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011595 *cntrp = ppd->cntrs;
11596
11597 for (i = 0; i < PORT_CNTR_LAST; i++) {
11598 entry = &port_cntrs[i];
11599 hfi1_cdbg(CNTR, "reading %s", entry->name);
11600 if (entry->flags & CNTR_DISABLED) {
11601 /* Nothing */
11602 hfi1_cdbg(CNTR, "\tDisabled\n");
11603 continue;
11604 }
11605
11606 if (entry->flags & CNTR_VL) {
11607 hfi1_cdbg(CNTR, "\tPer VL");
11608 for (j = 0; j < C_VL_COUNT; j++) {
11609 val = entry->rw_cntr(entry, ppd, j,
11610 CNTR_MODE_R,
11611 0);
11612 hfi1_cdbg(
11613 CNTR,
11614 "\t\tRead 0x%llx for %d",
11615 val, j);
11616 ppd->cntrs[entry->offset + j] = val;
11617 }
11618 } else {
11619 val = entry->rw_cntr(entry, ppd,
11620 CNTR_INVALID_VL,
11621 CNTR_MODE_R,
11622 0);
11623 ppd->cntrs[entry->offset] = val;
11624 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11625 }
11626 }
11627 }
11628 return ret;
11629}
11630
11631static void free_cntrs(struct hfi1_devdata *dd)
11632{
11633 struct hfi1_pportdata *ppd;
11634 int i;
11635
11636 if (dd->synth_stats_timer.data)
11637 del_timer_sync(&dd->synth_stats_timer);
11638 dd->synth_stats_timer.data = 0;
11639 ppd = (struct hfi1_pportdata *)(dd + 1);
11640 for (i = 0; i < dd->num_pports; i++, ppd++) {
11641 kfree(ppd->cntrs);
11642 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011643 free_percpu(ppd->ibport_data.rvp.rc_acks);
11644 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11645 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011646 ppd->cntrs = NULL;
11647 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011648 ppd->ibport_data.rvp.rc_acks = NULL;
11649 ppd->ibport_data.rvp.rc_qacks = NULL;
11650 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011651 }
11652 kfree(dd->portcntrnames);
11653 dd->portcntrnames = NULL;
11654 kfree(dd->cntrs);
11655 dd->cntrs = NULL;
11656 kfree(dd->scntrs);
11657 dd->scntrs = NULL;
11658 kfree(dd->cntrnames);
11659 dd->cntrnames = NULL;
11660}
11661
11662#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11663#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11664
11665static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11666 u64 *psval, void *context, int vl)
11667{
11668 u64 val;
11669 u64 sval = *psval;
11670
11671 if (entry->flags & CNTR_DISABLED) {
11672 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11673 return 0;
11674 }
11675
11676 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11677
11678 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11679
11680 /* If its a synthetic counter there is more work we need to do */
11681 if (entry->flags & CNTR_SYNTH) {
11682 if (sval == CNTR_MAX) {
11683 /* No need to read already saturated */
11684 return CNTR_MAX;
11685 }
11686
11687 if (entry->flags & CNTR_32BIT) {
11688 /* 32bit counters can wrap multiple times */
11689 u64 upper = sval >> 32;
11690 u64 lower = (sval << 32) >> 32;
11691
11692 if (lower > val) { /* hw wrapped */
11693 if (upper == CNTR_32BIT_MAX)
11694 val = CNTR_MAX;
11695 else
11696 upper++;
11697 }
11698
11699 if (val != CNTR_MAX)
11700 val = (upper << 32) | val;
11701
11702 } else {
11703 /* If we rolled we are saturated */
11704 if ((val < sval) || (val > CNTR_MAX))
11705 val = CNTR_MAX;
11706 }
11707 }
11708
11709 *psval = val;
11710
11711 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11712
11713 return val;
11714}
11715
11716static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11717 struct cntr_entry *entry,
11718 u64 *psval, void *context, int vl, u64 data)
11719{
11720 u64 val;
11721
11722 if (entry->flags & CNTR_DISABLED) {
11723 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11724 return 0;
11725 }
11726
11727 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11728
11729 if (entry->flags & CNTR_SYNTH) {
11730 *psval = data;
11731 if (entry->flags & CNTR_32BIT) {
11732 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11733 (data << 32) >> 32);
11734 val = data; /* return the full 64bit value */
11735 } else {
11736 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11737 data);
11738 }
11739 } else {
11740 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11741 }
11742
11743 *psval = val;
11744
11745 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11746
11747 return val;
11748}
11749
11750u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11751{
11752 struct cntr_entry *entry;
11753 u64 *sval;
11754
11755 entry = &dev_cntrs[index];
11756 sval = dd->scntrs + entry->offset;
11757
11758 if (vl != CNTR_INVALID_VL)
11759 sval += vl;
11760
11761 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11762}
11763
11764u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11765{
11766 struct cntr_entry *entry;
11767 u64 *sval;
11768
11769 entry = &dev_cntrs[index];
11770 sval = dd->scntrs + entry->offset;
11771
11772 if (vl != CNTR_INVALID_VL)
11773 sval += vl;
11774
11775 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11776}
11777
11778u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11779{
11780 struct cntr_entry *entry;
11781 u64 *sval;
11782
11783 entry = &port_cntrs[index];
11784 sval = ppd->scntrs + entry->offset;
11785
11786 if (vl != CNTR_INVALID_VL)
11787 sval += vl;
11788
11789 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11790 (index <= C_RCV_HDR_OVF_LAST)) {
11791 /* We do not want to bother for disabled contexts */
11792 return 0;
11793 }
11794
11795 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11796}
11797
11798u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11799{
11800 struct cntr_entry *entry;
11801 u64 *sval;
11802
11803 entry = &port_cntrs[index];
11804 sval = ppd->scntrs + entry->offset;
11805
11806 if (vl != CNTR_INVALID_VL)
11807 sval += vl;
11808
11809 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11810 (index <= C_RCV_HDR_OVF_LAST)) {
11811 /* We do not want to bother for disabled contexts */
11812 return 0;
11813 }
11814
11815 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11816}
11817
11818static void update_synth_timer(unsigned long opaque)
11819{
11820 u64 cur_tx;
11821 u64 cur_rx;
11822 u64 total_flits;
11823 u8 update = 0;
11824 int i, j, vl;
11825 struct hfi1_pportdata *ppd;
11826 struct cntr_entry *entry;
11827
11828 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11829
11830 /*
11831 * Rather than keep beating on the CSRs pick a minimal set that we can
11832 * check to watch for potential roll over. We can do this by looking at
11833 * the number of flits sent/recv. If the total flits exceeds 32bits then
11834 * we have to iterate all the counters and update.
11835 */
11836 entry = &dev_cntrs[C_DC_RCV_FLITS];
11837 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11838
11839 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11840 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11841
11842 hfi1_cdbg(
11843 CNTR,
11844 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11845 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11846
11847 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11848 /*
11849 * May not be strictly necessary to update but it won't hurt and
11850 * simplifies the logic here.
11851 */
11852 update = 1;
11853 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11854 dd->unit);
11855 } else {
11856 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11857 hfi1_cdbg(CNTR,
11858 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11859 total_flits, (u64)CNTR_32BIT_MAX);
11860 if (total_flits >= CNTR_32BIT_MAX) {
11861 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11862 dd->unit);
11863 update = 1;
11864 }
11865 }
11866
11867 if (update) {
11868 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11869 for (i = 0; i < DEV_CNTR_LAST; i++) {
11870 entry = &dev_cntrs[i];
11871 if (entry->flags & CNTR_VL) {
11872 for (vl = 0; vl < C_VL_COUNT; vl++)
11873 read_dev_cntr(dd, i, vl);
11874 } else {
11875 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11876 }
11877 }
11878 ppd = (struct hfi1_pportdata *)(dd + 1);
11879 for (i = 0; i < dd->num_pports; i++, ppd++) {
11880 for (j = 0; j < PORT_CNTR_LAST; j++) {
11881 entry = &port_cntrs[j];
11882 if (entry->flags & CNTR_VL) {
11883 for (vl = 0; vl < C_VL_COUNT; vl++)
11884 read_port_cntr(ppd, j, vl);
11885 } else {
11886 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11887 }
11888 }
11889 }
11890
11891 /*
11892 * We want the value in the register. The goal is to keep track
11893 * of the number of "ticks" not the counter value. In other
11894 * words if the register rolls we want to notice it and go ahead
11895 * and force an update.
11896 */
11897 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11898 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11899 CNTR_MODE_R, 0);
11900
11901 entry = &dev_cntrs[C_DC_RCV_FLITS];
11902 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11903 CNTR_MODE_R, 0);
11904
11905 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11906 dd->unit, dd->last_tx, dd->last_rx);
11907
11908 } else {
11909 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11910 }
11911
Bart Van Assche48a0cc132016-06-03 12:09:56 -070011912 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011913}
11914
11915#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11916static int init_cntrs(struct hfi1_devdata *dd)
11917{
Dean Luickc024c552016-01-11 18:30:57 -050011918 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011919 size_t sz;
11920 char *p;
11921 char name[C_MAX_NAME];
11922 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011923 const char *bit_type_32 = ",32";
11924 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011925
11926 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011927 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11928 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011929
11930 /***********************/
11931 /* per device counters */
11932 /***********************/
11933
11934 /* size names and determine how many we have*/
11935 dd->ndevcntrs = 0;
11936 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011937
11938 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011939 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11940 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11941 continue;
11942 }
11943
11944 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011945 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011946 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011947 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080011948 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011949 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011950 /* Add ",32" for 32-bit counters */
11951 if (dev_cntrs[i].flags & CNTR_32BIT)
11952 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011953 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011954 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011955 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011956 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011957 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011958 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011959 snprintf(name, C_MAX_NAME, "%s%d",
11960 dev_cntrs[i].name, j);
11961 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011962 /* Add ",32" for 32-bit counters */
11963 if (dev_cntrs[i].flags & CNTR_32BIT)
11964 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011965 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011966 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011967 }
11968 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011969 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011970 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011971 /* Add ",32" for 32-bit counters */
11972 if (dev_cntrs[i].flags & CNTR_32BIT)
11973 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011974 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011975 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011976 }
11977 }
11978
11979 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011980 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011981 if (!dd->cntrs)
11982 goto bail;
11983
Dean Luickc024c552016-01-11 18:30:57 -050011984 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011985 if (!dd->scntrs)
11986 goto bail;
11987
Mike Marciniszyn77241052015-07-30 15:17:43 -040011988 /* allocate space for the counter names */
11989 dd->cntrnameslen = sz;
11990 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11991 if (!dd->cntrnames)
11992 goto bail;
11993
11994 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011995 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011996 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11997 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011998 } else if (dev_cntrs[i].flags & CNTR_VL) {
11999 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012000 snprintf(name, C_MAX_NAME, "%s%d",
12001 dev_cntrs[i].name,
12002 vl_from_idx(j));
12003 memcpy(p, name, strlen(name));
12004 p += strlen(name);
12005
12006 /* Counter is 32 bits */
12007 if (dev_cntrs[i].flags & CNTR_32BIT) {
12008 memcpy(p, bit_type_32, bit_type_32_sz);
12009 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012010 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012011
Mike Marciniszyn77241052015-07-30 15:17:43 -040012012 *p++ = '\n';
12013 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012014 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12015 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012016 snprintf(name, C_MAX_NAME, "%s%d",
12017 dev_cntrs[i].name, j);
12018 memcpy(p, name, strlen(name));
12019 p += strlen(name);
12020
12021 /* Counter is 32 bits */
12022 if (dev_cntrs[i].flags & CNTR_32BIT) {
12023 memcpy(p, bit_type_32, bit_type_32_sz);
12024 p += bit_type_32_sz;
12025 }
12026
12027 *p++ = '\n';
12028 }
12029 } else {
12030 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12031 p += strlen(dev_cntrs[i].name);
12032
12033 /* Counter is 32 bits */
12034 if (dev_cntrs[i].flags & CNTR_32BIT) {
12035 memcpy(p, bit_type_32, bit_type_32_sz);
12036 p += bit_type_32_sz;
12037 }
12038
12039 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012040 }
12041 }
12042
12043 /*********************/
12044 /* per port counters */
12045 /*********************/
12046
12047 /*
12048 * Go through the counters for the overflows and disable the ones we
12049 * don't need. This varies based on platform so we need to do it
12050 * dynamically here.
12051 */
12052 rcv_ctxts = dd->num_rcv_contexts;
12053 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12054 i <= C_RCV_HDR_OVF_LAST; i++) {
12055 port_cntrs[i].flags |= CNTR_DISABLED;
12056 }
12057
12058 /* size port counter names and determine how many we have*/
12059 sz = 0;
12060 dd->nportcntrs = 0;
12061 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012062 if (port_cntrs[i].flags & CNTR_DISABLED) {
12063 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12064 continue;
12065 }
12066
12067 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012068 port_cntrs[i].offset = dd->nportcntrs;
12069 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012070 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012071 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012072 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012073 /* Add ",32" for 32-bit counters */
12074 if (port_cntrs[i].flags & CNTR_32BIT)
12075 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012076 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012077 dd->nportcntrs++;
12078 }
12079 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012080 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012081 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012082 /* Add ",32" for 32-bit counters */
12083 if (port_cntrs[i].flags & CNTR_32BIT)
12084 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012085 port_cntrs[i].offset = dd->nportcntrs;
12086 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012087 }
12088 }
12089
12090 /* allocate space for the counter names */
12091 dd->portcntrnameslen = sz;
12092 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12093 if (!dd->portcntrnames)
12094 goto bail;
12095
12096 /* fill in port cntr names */
12097 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12098 if (port_cntrs[i].flags & CNTR_DISABLED)
12099 continue;
12100
12101 if (port_cntrs[i].flags & CNTR_VL) {
12102 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012103 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012104 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012105 memcpy(p, name, strlen(name));
12106 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012107
12108 /* Counter is 32 bits */
12109 if (port_cntrs[i].flags & CNTR_32BIT) {
12110 memcpy(p, bit_type_32, bit_type_32_sz);
12111 p += bit_type_32_sz;
12112 }
12113
Mike Marciniszyn77241052015-07-30 15:17:43 -040012114 *p++ = '\n';
12115 }
12116 } else {
12117 memcpy(p, port_cntrs[i].name,
12118 strlen(port_cntrs[i].name));
12119 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012120
12121 /* Counter is 32 bits */
12122 if (port_cntrs[i].flags & CNTR_32BIT) {
12123 memcpy(p, bit_type_32, bit_type_32_sz);
12124 p += bit_type_32_sz;
12125 }
12126
Mike Marciniszyn77241052015-07-30 15:17:43 -040012127 *p++ = '\n';
12128 }
12129 }
12130
12131 /* allocate per port storage for counter values */
12132 ppd = (struct hfi1_pportdata *)(dd + 1);
12133 for (i = 0; i < dd->num_pports; i++, ppd++) {
12134 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12135 if (!ppd->cntrs)
12136 goto bail;
12137
12138 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12139 if (!ppd->scntrs)
12140 goto bail;
12141 }
12142
12143 /* CPU counters need to be allocated and zeroed */
12144 if (init_cpu_counters(dd))
12145 goto bail;
12146
12147 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12148 return 0;
12149bail:
12150 free_cntrs(dd);
12151 return -ENOMEM;
12152}
12153
Mike Marciniszyn77241052015-07-30 15:17:43 -040012154static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12155{
12156 switch (chip_lstate) {
12157 default:
12158 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012159 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12160 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012161 /* fall through */
12162 case LSTATE_DOWN:
12163 return IB_PORT_DOWN;
12164 case LSTATE_INIT:
12165 return IB_PORT_INIT;
12166 case LSTATE_ARMED:
12167 return IB_PORT_ARMED;
12168 case LSTATE_ACTIVE:
12169 return IB_PORT_ACTIVE;
12170 }
12171}
12172
12173u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12174{
12175 /* look at the HFI meta-states only */
12176 switch (chip_pstate & 0xf0) {
12177 default:
12178 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012179 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012180 /* fall through */
12181 case PLS_DISABLED:
12182 return IB_PORTPHYSSTATE_DISABLED;
12183 case PLS_OFFLINE:
12184 return OPA_PORTPHYSSTATE_OFFLINE;
12185 case PLS_POLLING:
12186 return IB_PORTPHYSSTATE_POLLING;
12187 case PLS_CONFIGPHY:
12188 return IB_PORTPHYSSTATE_TRAINING;
12189 case PLS_LINKUP:
12190 return IB_PORTPHYSSTATE_LINKUP;
12191 case PLS_PHYTEST:
12192 return IB_PORTPHYSSTATE_PHY_TEST;
12193 }
12194}
12195
12196/* return the OPA port logical state name */
12197const char *opa_lstate_name(u32 lstate)
12198{
12199 static const char * const port_logical_names[] = {
12200 "PORT_NOP",
12201 "PORT_DOWN",
12202 "PORT_INIT",
12203 "PORT_ARMED",
12204 "PORT_ACTIVE",
12205 "PORT_ACTIVE_DEFER",
12206 };
12207 if (lstate < ARRAY_SIZE(port_logical_names))
12208 return port_logical_names[lstate];
12209 return "unknown";
12210}
12211
12212/* return the OPA port physical state name */
12213const char *opa_pstate_name(u32 pstate)
12214{
12215 static const char * const port_physical_names[] = {
12216 "PHYS_NOP",
12217 "reserved1",
12218 "PHYS_POLL",
12219 "PHYS_DISABLED",
12220 "PHYS_TRAINING",
12221 "PHYS_LINKUP",
12222 "PHYS_LINK_ERR_RECOVER",
12223 "PHYS_PHY_TEST",
12224 "reserved8",
12225 "PHYS_OFFLINE",
12226 "PHYS_GANGED",
12227 "PHYS_TEST",
12228 };
12229 if (pstate < ARRAY_SIZE(port_physical_names))
12230 return port_physical_names[pstate];
12231 return "unknown";
12232}
12233
12234/*
12235 * Read the hardware link state and set the driver's cached value of it.
12236 * Return the (new) current value.
12237 */
12238u32 get_logical_state(struct hfi1_pportdata *ppd)
12239{
12240 u32 new_state;
12241
12242 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12243 if (new_state != ppd->lstate) {
12244 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012245 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012246 ppd->lstate = new_state;
12247 }
12248 /*
12249 * Set port status flags in the page mapped into userspace
12250 * memory. Do it here to ensure a reliable state - this is
12251 * the only function called by all state handling code.
12252 * Always set the flags due to the fact that the cache value
12253 * might have been changed explicitly outside of this
12254 * function.
12255 */
12256 if (ppd->statusp) {
12257 switch (ppd->lstate) {
12258 case IB_PORT_DOWN:
12259 case IB_PORT_INIT:
12260 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12261 HFI1_STATUS_IB_READY);
12262 break;
12263 case IB_PORT_ARMED:
12264 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12265 break;
12266 case IB_PORT_ACTIVE:
12267 *ppd->statusp |= HFI1_STATUS_IB_READY;
12268 break;
12269 }
12270 }
12271 return ppd->lstate;
12272}
12273
12274/**
12275 * wait_logical_linkstate - wait for an IB link state change to occur
12276 * @ppd: port device
12277 * @state: the state to wait for
12278 * @msecs: the number of milliseconds to wait
12279 *
12280 * Wait up to msecs milliseconds for IB link state change to occur.
12281 * For now, take the easy polling route.
12282 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12283 */
12284static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12285 int msecs)
12286{
12287 unsigned long timeout;
12288
12289 timeout = jiffies + msecs_to_jiffies(msecs);
12290 while (1) {
12291 if (get_logical_state(ppd) == state)
12292 return 0;
12293 if (time_after(jiffies, timeout))
12294 break;
12295 msleep(20);
12296 }
12297 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12298
12299 return -ETIMEDOUT;
12300}
12301
12302u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12303{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012304 u32 pstate;
12305 u32 ib_pstate;
12306
12307 pstate = read_physical_state(ppd->dd);
12308 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012309 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012310 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012311 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12312 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12313 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012314 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012315 }
12316 return ib_pstate;
12317}
12318
12319/*
12320 * Read/modify/write ASIC_QSFP register bits as selected by mask
12321 * data: 0 or 1 in the positions depending on what needs to be written
12322 * dir: 0 for read, 1 for write
12323 * mask: select by setting
12324 * I2CCLK (bit 0)
12325 * I2CDATA (bit 1)
12326 */
12327u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12328 u32 mask)
12329{
12330 u64 qsfp_oe, target_oe;
12331
12332 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12333 if (mask) {
12334 /* We are writing register bits, so lock access */
12335 dir &= mask;
12336 data &= mask;
12337
12338 qsfp_oe = read_csr(dd, target_oe);
12339 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12340 write_csr(dd, target_oe, qsfp_oe);
12341 }
12342 /* We are exclusively reading bits here, but it is unlikely
12343 * we'll get valid data when we set the direction of the pin
12344 * in the same call, so read should call this function again
12345 * to get valid data
12346 */
12347 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12348}
12349
12350#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12351(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12352
12353#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12354(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12355
12356int hfi1_init_ctxt(struct send_context *sc)
12357{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012358 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012359 struct hfi1_devdata *dd = sc->dd;
12360 u64 reg;
12361 u8 set = (sc->type == SC_USER ?
12362 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12363 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12364 reg = read_kctxt_csr(dd, sc->hw_context,
12365 SEND_CTXT_CHECK_ENABLE);
12366 if (set)
12367 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12368 else
12369 SET_STATIC_RATE_CONTROL_SMASK(reg);
12370 write_kctxt_csr(dd, sc->hw_context,
12371 SEND_CTXT_CHECK_ENABLE, reg);
12372 }
12373 return 0;
12374}
12375
12376int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12377{
12378 int ret = 0;
12379 u64 reg;
12380
12381 if (dd->icode != ICODE_RTL_SILICON) {
12382 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12383 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12384 __func__);
12385 return -EINVAL;
12386 }
12387 reg = read_csr(dd, ASIC_STS_THERM);
12388 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12389 ASIC_STS_THERM_CURR_TEMP_MASK);
12390 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12391 ASIC_STS_THERM_LO_TEMP_MASK);
12392 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12393 ASIC_STS_THERM_HI_TEMP_MASK);
12394 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12395 ASIC_STS_THERM_CRIT_TEMP_MASK);
12396 /* triggers is a 3-bit value - 1 bit per trigger. */
12397 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12398
12399 return ret;
12400}
12401
12402/* ========================================================================= */
12403
12404/*
12405 * Enable/disable chip from delivering interrupts.
12406 */
12407void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12408{
12409 int i;
12410
12411 /*
12412 * In HFI, the mask needs to be 1 to allow interrupts.
12413 */
12414 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012415 /* enable all interrupts */
12416 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012417 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012418
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012419 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012420 } else {
12421 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012422 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012423 }
12424}
12425
12426/*
12427 * Clear all interrupt sources on the chip.
12428 */
12429static void clear_all_interrupts(struct hfi1_devdata *dd)
12430{
12431 int i;
12432
12433 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012434 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012435
12436 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12437 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12438 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12439 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12440 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12441 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12442 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12443 for (i = 0; i < dd->chip_send_contexts; i++)
12444 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12445 for (i = 0; i < dd->chip_sdma_engines; i++)
12446 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12447
12448 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12449 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12450 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12451}
12452
12453/* Move to pcie.c? */
12454static void disable_intx(struct pci_dev *pdev)
12455{
12456 pci_intx(pdev, 0);
12457}
12458
12459static void clean_up_interrupts(struct hfi1_devdata *dd)
12460{
12461 int i;
12462
12463 /* remove irqs - must happen before disabling/turning off */
12464 if (dd->num_msix_entries) {
12465 /* MSI-X */
12466 struct hfi1_msix_entry *me = dd->msix_entries;
12467
12468 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012469 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012470 continue;
12471 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012472 free_irq(me->msix.vector, me->arg);
12473 }
12474 } else {
12475 /* INTx */
12476 if (dd->requested_intx_irq) {
12477 free_irq(dd->pcidev->irq, dd);
12478 dd->requested_intx_irq = 0;
12479 }
12480 }
12481
12482 /* turn off interrupts */
12483 if (dd->num_msix_entries) {
12484 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012485 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012486 } else {
12487 /* INTx */
12488 disable_intx(dd->pcidev);
12489 }
12490
12491 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012492 kfree(dd->msix_entries);
12493 dd->msix_entries = NULL;
12494 dd->num_msix_entries = 0;
12495}
12496
12497/*
12498 * Remap the interrupt source from the general handler to the given MSI-X
12499 * interrupt.
12500 */
12501static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12502{
12503 u64 reg;
12504 int m, n;
12505
12506 /* clear from the handled mask of the general interrupt */
12507 m = isrc / 64;
12508 n = isrc % 64;
12509 dd->gi_mask[m] &= ~((u64)1 << n);
12510
12511 /* direct the chip source to the given MSI-X interrupt */
12512 m = isrc / 8;
12513 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012514 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12515 reg &= ~((u64)0xff << (8 * n));
12516 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12517 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012518}
12519
12520static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12521 int engine, int msix_intr)
12522{
12523 /*
12524 * SDMA engine interrupt sources grouped by type, rather than
12525 * engine. Per-engine interrupts are as follows:
12526 * SDMA
12527 * SDMAProgress
12528 * SDMAIdle
12529 */
Jubin John8638b772016-02-14 20:19:24 -080012530 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012531 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012532 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012533 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012534 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012535 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012536}
12537
Mike Marciniszyn77241052015-07-30 15:17:43 -040012538static int request_intx_irq(struct hfi1_devdata *dd)
12539{
12540 int ret;
12541
Jubin John98050712015-11-16 21:59:27 -050012542 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12543 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012544 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012545 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012546 if (ret)
12547 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012548 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012549 else
12550 dd->requested_intx_irq = 1;
12551 return ret;
12552}
12553
12554static int request_msix_irqs(struct hfi1_devdata *dd)
12555{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012556 int first_general, last_general;
12557 int first_sdma, last_sdma;
12558 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012559 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012560
12561 /* calculate the ranges we are going to use */
12562 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012563 last_general = first_general + 1;
12564 first_sdma = last_general;
12565 last_sdma = first_sdma + dd->num_sdma;
12566 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012567 last_rx = first_rx + dd->n_krcv_queues;
12568
12569 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012570 * Sanity check - the code expects all SDMA chip source
12571 * interrupts to be in the same CSR, starting at bit 0. Verify
12572 * that this is true by checking the bit location of the start.
12573 */
12574 BUILD_BUG_ON(IS_SDMA_START % 64);
12575
12576 for (i = 0; i < dd->num_msix_entries; i++) {
12577 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12578 const char *err_info;
12579 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012580 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012581 void *arg;
12582 int idx;
12583 struct hfi1_ctxtdata *rcd = NULL;
12584 struct sdma_engine *sde = NULL;
12585
12586 /* obtain the arguments to request_irq */
12587 if (first_general <= i && i < last_general) {
12588 idx = i - first_general;
12589 handler = general_interrupt;
12590 arg = dd;
12591 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012592 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012593 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012594 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012595 } else if (first_sdma <= i && i < last_sdma) {
12596 idx = i - first_sdma;
12597 sde = &dd->per_sdma[idx];
12598 handler = sdma_interrupt;
12599 arg = sde;
12600 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012601 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012602 err_info = "sdma";
12603 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012604 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012605 } else if (first_rx <= i && i < last_rx) {
12606 idx = i - first_rx;
12607 rcd = dd->rcd[idx];
12608 /* no interrupt if no rcd */
12609 if (!rcd)
12610 continue;
12611 /*
12612 * Set the interrupt register and mask for this
12613 * context's interrupt.
12614 */
Jubin John8638b772016-02-14 20:19:24 -080012615 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012616 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012617 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012618 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012619 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012620 arg = rcd;
12621 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012622 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012623 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012624 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012625 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012626 } else {
12627 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012628 * ignore it
12629 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012630 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012631 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012632 continue;
12633 }
12634 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012635 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012636 continue;
12637 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012638 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012639
Dean Luickf4f30031c2015-10-26 10:28:44 -040012640 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012641 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012642 if (ret) {
12643 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012644 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12645 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012646 return ret;
12647 }
12648 /*
12649 * assign arg after request_irq call, so it will be
12650 * cleaned up
12651 */
12652 me->arg = arg;
12653
Mitko Haralanov957558c2016-02-03 14:33:40 -080012654 ret = hfi1_get_irq_affinity(dd, me);
12655 if (ret)
12656 dd_dev_err(dd,
12657 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012658 }
12659
Mike Marciniszyn77241052015-07-30 15:17:43 -040012660 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012661}
12662
12663/*
12664 * Set the general handler to accept all interrupts, remap all
12665 * chip interrupts back to MSI-X 0.
12666 */
12667static void reset_interrupts(struct hfi1_devdata *dd)
12668{
12669 int i;
12670
12671 /* all interrupts handled by the general handler */
12672 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12673 dd->gi_mask[i] = ~(u64)0;
12674
12675 /* all chip interrupts map to MSI-X 0 */
12676 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012677 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012678}
12679
12680static int set_up_interrupts(struct hfi1_devdata *dd)
12681{
12682 struct hfi1_msix_entry *entries;
12683 u32 total, request;
12684 int i, ret;
12685 int single_interrupt = 0; /* we expect to have all the interrupts */
12686
12687 /*
12688 * Interrupt count:
12689 * 1 general, "slow path" interrupt (includes the SDMA engines
12690 * slow source, SDMACleanupDone)
12691 * N interrupts - one per used SDMA engine
12692 * M interrupt - one per kernel receive context
12693 */
12694 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12695
12696 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12697 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012698 ret = -ENOMEM;
12699 goto fail;
12700 }
12701 /* 1-1 MSI-X entry assignment */
12702 for (i = 0; i < total; i++)
12703 entries[i].msix.entry = i;
12704
12705 /* ask for MSI-X interrupts */
12706 request = total;
12707 request_msix(dd, &request, entries);
12708
12709 if (request == 0) {
12710 /* using INTx */
12711 /* dd->num_msix_entries already zero */
12712 kfree(entries);
12713 single_interrupt = 1;
12714 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12715 } else {
12716 /* using MSI-X */
12717 dd->num_msix_entries = request;
12718 dd->msix_entries = entries;
12719
12720 if (request != total) {
12721 /* using MSI-X, with reduced interrupts */
12722 dd_dev_err(
12723 dd,
12724 "cannot handle reduced interrupt case, want %u, got %u\n",
12725 total, request);
12726 ret = -EINVAL;
12727 goto fail;
12728 }
12729 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12730 }
12731
12732 /* mask all interrupts */
12733 set_intr_state(dd, 0);
12734 /* clear all pending interrupts */
12735 clear_all_interrupts(dd);
12736
12737 /* reset general handler mask, chip MSI-X mappings */
12738 reset_interrupts(dd);
12739
12740 if (single_interrupt)
12741 ret = request_intx_irq(dd);
12742 else
12743 ret = request_msix_irqs(dd);
12744 if (ret)
12745 goto fail;
12746
12747 return 0;
12748
12749fail:
12750 clean_up_interrupts(dd);
12751 return ret;
12752}
12753
12754/*
12755 * Set up context values in dd. Sets:
12756 *
12757 * num_rcv_contexts - number of contexts being used
12758 * n_krcv_queues - number of kernel contexts
12759 * first_user_ctxt - first non-kernel context in array of contexts
12760 * freectxts - number of free user contexts
12761 * num_send_contexts - number of PIO send contexts being used
12762 */
12763static int set_up_context_variables(struct hfi1_devdata *dd)
12764{
12765 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012766 int total_contexts;
12767 int ret;
12768 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070012769 int qos_rmt_count;
12770 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012771
12772 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012773 * Kernel receive contexts:
12774 * - min of 2 or 1 context/numa (excluding control context)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012775 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070012776 * - Context 1 - first kernel context
12777 * - Context 2 - second kernel context
12778 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040012779 */
12780 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012781 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012782 * n_krcvqs is the sum of module parameter kernel receive
12783 * contexts, krcvqs[]. It does not include the control
12784 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012785 */
Dean Luick33a9eb52016-04-12 10:50:22 -070012786 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012787 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012788 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012789 num_kernel_contexts =
12790 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12791 /*
12792 * Every kernel receive context needs an ACK send context.
12793 * one send context is allocated for each VL{0-7} and VL15
12794 */
12795 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12796 dd_dev_err(dd,
12797 "Reducing # kernel rcv contexts to: %d, from %d\n",
12798 (int)(dd->chip_send_contexts - num_vls - 1),
12799 (int)num_kernel_contexts);
12800 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12801 }
12802 /*
Jubin John0852d242016-04-12 11:30:08 -070012803 * User contexts:
12804 * - default to 1 user context per real (non-HT) CPU core if
12805 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012806 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012807 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070012808 num_user_contexts =
12809 cpumask_weight(&dd->affinity->real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012810
12811 total_contexts = num_kernel_contexts + num_user_contexts;
12812
12813 /*
12814 * Adjust the counts given a global max.
12815 */
12816 if (total_contexts > dd->chip_rcv_contexts) {
12817 dd_dev_err(dd,
12818 "Reducing # user receive contexts to: %d, from %d\n",
12819 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12820 (int)num_user_contexts);
12821 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12822 /* recalculate */
12823 total_contexts = num_kernel_contexts + num_user_contexts;
12824 }
12825
Dean Luick8f000f72016-04-12 11:32:06 -070012826 /* each user context requires an entry in the RMT */
12827 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12828 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12829 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12830 dd_dev_err(dd,
12831 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12832 (int)num_user_contexts,
12833 user_rmt_reduced);
12834 /* recalculate */
12835 num_user_contexts = user_rmt_reduced;
12836 total_contexts = num_kernel_contexts + num_user_contexts;
12837 }
12838
Mike Marciniszyn77241052015-07-30 15:17:43 -040012839 /* the first N are kernel contexts, the rest are user contexts */
12840 dd->num_rcv_contexts = total_contexts;
12841 dd->n_krcv_queues = num_kernel_contexts;
12842 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012843 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012844 dd->freectxts = num_user_contexts;
12845 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012846 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12847 (int)dd->chip_rcv_contexts,
12848 (int)dd->num_rcv_contexts,
12849 (int)dd->n_krcv_queues,
12850 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012851
12852 /*
12853 * Receive array allocation:
12854 * All RcvArray entries are divided into groups of 8. This
12855 * is required by the hardware and will speed up writes to
12856 * consecutive entries by using write-combining of the entire
12857 * cacheline.
12858 *
12859 * The number of groups are evenly divided among all contexts.
12860 * any left over groups will be given to the first N user
12861 * contexts.
12862 */
12863 dd->rcv_entries.group_size = RCV_INCREMENT;
12864 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12865 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12866 dd->rcv_entries.nctxt_extra = ngroups -
12867 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12868 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12869 dd->rcv_entries.ngroups,
12870 dd->rcv_entries.nctxt_extra);
12871 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12872 MAX_EAGER_ENTRIES * 2) {
12873 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12874 dd->rcv_entries.group_size;
12875 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012876 "RcvArray group count too high, change to %u\n",
12877 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012878 dd->rcv_entries.nctxt_extra = 0;
12879 }
12880 /*
12881 * PIO send contexts
12882 */
12883 ret = init_sc_pools_and_sizes(dd);
12884 if (ret >= 0) { /* success */
12885 dd->num_send_contexts = ret;
12886 dd_dev_info(
12887 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012888 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040012889 dd->chip_send_contexts,
12890 dd->num_send_contexts,
12891 dd->sc_sizes[SC_KERNEL].count,
12892 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070012893 dd->sc_sizes[SC_USER].count,
12894 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012895 ret = 0; /* success */
12896 }
12897
12898 return ret;
12899}
12900
12901/*
12902 * Set the device/port partition key table. The MAD code
12903 * will ensure that, at least, the partial management
12904 * partition key is present in the table.
12905 */
12906static void set_partition_keys(struct hfi1_pportdata *ppd)
12907{
12908 struct hfi1_devdata *dd = ppd->dd;
12909 u64 reg = 0;
12910 int i;
12911
12912 dd_dev_info(dd, "Setting partition keys\n");
12913 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12914 reg |= (ppd->pkeys[i] &
12915 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12916 ((i % 4) *
12917 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12918 /* Each register holds 4 PKey values. */
12919 if ((i % 4) == 3) {
12920 write_csr(dd, RCV_PARTITION_KEY +
12921 ((i - 3) * 2), reg);
12922 reg = 0;
12923 }
12924 }
12925
12926 /* Always enable HW pkeys check when pkeys table is set */
12927 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12928}
12929
12930/*
12931 * These CSRs and memories are uninitialized on reset and must be
12932 * written before reading to set the ECC/parity bits.
12933 *
12934 * NOTE: All user context CSRs that are not mmaped write-only
12935 * (e.g. the TID flows) must be initialized even if the driver never
12936 * reads them.
12937 */
12938static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12939{
12940 int i, j;
12941
12942 /* CceIntMap */
12943 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012944 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012945
12946 /* SendCtxtCreditReturnAddr */
12947 for (i = 0; i < dd->chip_send_contexts; i++)
12948 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12949
12950 /* PIO Send buffers */
12951 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080012952 /*
12953 * These are not normally read, and (presently) have no method
12954 * to be read, so are not pre-initialized
12955 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012956
12957 /* RcvHdrAddr */
12958 /* RcvHdrTailAddr */
12959 /* RcvTidFlowTable */
12960 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12961 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12962 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12963 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012964 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012965 }
12966
12967 /* RcvArray */
12968 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080012969 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080012970 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012971
12972 /* RcvQPMapTable */
12973 for (i = 0; i < 32; i++)
12974 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12975}
12976
12977/*
12978 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12979 */
12980static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12981 u64 ctrl_bits)
12982{
12983 unsigned long timeout;
12984 u64 reg;
12985
12986 /* is the condition present? */
12987 reg = read_csr(dd, CCE_STATUS);
12988 if ((reg & status_bits) == 0)
12989 return;
12990
12991 /* clear the condition */
12992 write_csr(dd, CCE_CTRL, ctrl_bits);
12993
12994 /* wait for the condition to clear */
12995 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12996 while (1) {
12997 reg = read_csr(dd, CCE_STATUS);
12998 if ((reg & status_bits) == 0)
12999 return;
13000 if (time_after(jiffies, timeout)) {
13001 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013002 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13003 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013004 return;
13005 }
13006 udelay(1);
13007 }
13008}
13009
13010/* set CCE CSRs to chip reset defaults */
13011static void reset_cce_csrs(struct hfi1_devdata *dd)
13012{
13013 int i;
13014
13015 /* CCE_REVISION read-only */
13016 /* CCE_REVISION2 read-only */
13017 /* CCE_CTRL - bits clear automatically */
13018 /* CCE_STATUS read-only, use CceCtrl to clear */
13019 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13020 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13021 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13022 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13023 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13024 /* CCE_ERR_STATUS read-only */
13025 write_csr(dd, CCE_ERR_MASK, 0);
13026 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13027 /* CCE_ERR_FORCE leave alone */
13028 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13029 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13030 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13031 /* CCE_PCIE_CTRL leave alone */
13032 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13033 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13034 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013035 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013036 }
13037 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13038 /* CCE_MSIX_PBA read-only */
13039 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13040 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13041 }
13042 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13043 write_csr(dd, CCE_INT_MAP, 0);
13044 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13045 /* CCE_INT_STATUS read-only */
13046 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13047 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13048 /* CCE_INT_FORCE leave alone */
13049 /* CCE_INT_BLOCKED read-only */
13050 }
13051 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13052 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13053}
13054
Mike Marciniszyn77241052015-07-30 15:17:43 -040013055/* set MISC CSRs to chip reset defaults */
13056static void reset_misc_csrs(struct hfi1_devdata *dd)
13057{
13058 int i;
13059
13060 for (i = 0; i < 32; i++) {
13061 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13062 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13063 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13064 }
Jubin John4d114fd2016-02-14 20:21:43 -080013065 /*
13066 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13067 * only be written 128-byte chunks
13068 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013069 /* init RSA engine to clear lingering errors */
13070 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13071 write_csr(dd, MISC_CFG_RSA_MU, 0);
13072 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13073 /* MISC_STS_8051_DIGEST read-only */
13074 /* MISC_STS_SBM_DIGEST read-only */
13075 /* MISC_STS_PCIE_DIGEST read-only */
13076 /* MISC_STS_FAB_DIGEST read-only */
13077 /* MISC_ERR_STATUS read-only */
13078 write_csr(dd, MISC_ERR_MASK, 0);
13079 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13080 /* MISC_ERR_FORCE leave alone */
13081}
13082
13083/* set TXE CSRs to chip reset defaults */
13084static void reset_txe_csrs(struct hfi1_devdata *dd)
13085{
13086 int i;
13087
13088 /*
13089 * TXE Kernel CSRs
13090 */
13091 write_csr(dd, SEND_CTRL, 0);
13092 __cm_reset(dd, 0); /* reset CM internal state */
13093 /* SEND_CONTEXTS read-only */
13094 /* SEND_DMA_ENGINES read-only */
13095 /* SEND_PIO_MEM_SIZE read-only */
13096 /* SEND_DMA_MEM_SIZE read-only */
13097 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13098 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13099 /* SEND_PIO_ERR_STATUS read-only */
13100 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13101 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13102 /* SEND_PIO_ERR_FORCE leave alone */
13103 /* SEND_DMA_ERR_STATUS read-only */
13104 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13105 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13106 /* SEND_DMA_ERR_FORCE leave alone */
13107 /* SEND_EGRESS_ERR_STATUS read-only */
13108 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13109 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13110 /* SEND_EGRESS_ERR_FORCE leave alone */
13111 write_csr(dd, SEND_BTH_QP, 0);
13112 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13113 write_csr(dd, SEND_SC2VLT0, 0);
13114 write_csr(dd, SEND_SC2VLT1, 0);
13115 write_csr(dd, SEND_SC2VLT2, 0);
13116 write_csr(dd, SEND_SC2VLT3, 0);
13117 write_csr(dd, SEND_LEN_CHECK0, 0);
13118 write_csr(dd, SEND_LEN_CHECK1, 0);
13119 /* SEND_ERR_STATUS read-only */
13120 write_csr(dd, SEND_ERR_MASK, 0);
13121 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13122 /* SEND_ERR_FORCE read-only */
13123 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013124 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013125 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013126 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13127 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13128 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013129 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013130 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013131 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013132 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013133 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013134 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013135 /* SEND_CM_CREDIT_USED_STATUS read-only */
13136 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13137 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13138 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13139 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13140 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13141 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013142 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013143 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13144 /* SEND_CM_CREDIT_USED_VL read-only */
13145 /* SEND_CM_CREDIT_USED_VL15 read-only */
13146 /* SEND_EGRESS_CTXT_STATUS read-only */
13147 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13148 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13149 /* SEND_EGRESS_ERR_INFO read-only */
13150 /* SEND_EGRESS_ERR_SOURCE read-only */
13151
13152 /*
13153 * TXE Per-Context CSRs
13154 */
13155 for (i = 0; i < dd->chip_send_contexts; i++) {
13156 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13157 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13158 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13159 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13160 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13161 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13162 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13163 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13164 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13165 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13166 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13167 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13168 }
13169
13170 /*
13171 * TXE Per-SDMA CSRs
13172 */
13173 for (i = 0; i < dd->chip_sdma_engines; i++) {
13174 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13175 /* SEND_DMA_STATUS read-only */
13176 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13177 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13178 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13179 /* SEND_DMA_HEAD read-only */
13180 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13181 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13182 /* SEND_DMA_IDLE_CNT read-only */
13183 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13184 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13185 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13186 /* SEND_DMA_ENG_ERR_STATUS read-only */
13187 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13188 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13189 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13190 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13191 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13192 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13193 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13194 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13195 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13196 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13197 }
13198}
13199
13200/*
13201 * Expect on entry:
13202 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13203 */
13204static void init_rbufs(struct hfi1_devdata *dd)
13205{
13206 u64 reg;
13207 int count;
13208
13209 /*
13210 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13211 * clear.
13212 */
13213 count = 0;
13214 while (1) {
13215 reg = read_csr(dd, RCV_STATUS);
13216 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13217 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13218 break;
13219 /*
13220 * Give up after 1ms - maximum wait time.
13221 *
13222 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13223 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13224 * 148 KB / (66% * 250MB/s) = 920us
13225 */
13226 if (count++ > 500) {
13227 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013228 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13229 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013230 break;
13231 }
13232 udelay(2); /* do not busy-wait the CSR */
13233 }
13234
13235 /* start the init - expect RcvCtrl to be 0 */
13236 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13237
13238 /*
13239 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13240 * period after the write before RcvStatus.RxRbufInitDone is valid.
13241 * The delay in the first run through the loop below is sufficient and
13242 * required before the first read of RcvStatus.RxRbufInintDone.
13243 */
13244 read_csr(dd, RCV_CTRL);
13245
13246 /* wait for the init to finish */
13247 count = 0;
13248 while (1) {
13249 /* delay is required first time through - see above */
13250 udelay(2); /* do not busy-wait the CSR */
13251 reg = read_csr(dd, RCV_STATUS);
13252 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13253 break;
13254
13255 /* give up after 100us - slowest possible at 33MHz is 73us */
13256 if (count++ > 50) {
13257 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013258 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13259 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013260 break;
13261 }
13262 }
13263}
13264
13265/* set RXE CSRs to chip reset defaults */
13266static void reset_rxe_csrs(struct hfi1_devdata *dd)
13267{
13268 int i, j;
13269
13270 /*
13271 * RXE Kernel CSRs
13272 */
13273 write_csr(dd, RCV_CTRL, 0);
13274 init_rbufs(dd);
13275 /* RCV_STATUS read-only */
13276 /* RCV_CONTEXTS read-only */
13277 /* RCV_ARRAY_CNT read-only */
13278 /* RCV_BUF_SIZE read-only */
13279 write_csr(dd, RCV_BTH_QP, 0);
13280 write_csr(dd, RCV_MULTICAST, 0);
13281 write_csr(dd, RCV_BYPASS, 0);
13282 write_csr(dd, RCV_VL15, 0);
13283 /* this is a clear-down */
13284 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013285 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013286 /* RCV_ERR_STATUS read-only */
13287 write_csr(dd, RCV_ERR_MASK, 0);
13288 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13289 /* RCV_ERR_FORCE leave alone */
13290 for (i = 0; i < 32; i++)
13291 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13292 for (i = 0; i < 4; i++)
13293 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13294 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13295 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13296 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13297 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13298 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13299 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13300 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13301 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13302 }
13303 for (i = 0; i < 32; i++)
13304 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13305
13306 /*
13307 * RXE Kernel and User Per-Context CSRs
13308 */
13309 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13310 /* kernel */
13311 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13312 /* RCV_CTXT_STATUS read-only */
13313 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13314 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13315 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13316 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13317 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13318 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13319 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13320 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13321 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13322 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13323
13324 /* user */
13325 /* RCV_HDR_TAIL read-only */
13326 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13327 /* RCV_EGR_INDEX_TAIL read-only */
13328 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13329 /* RCV_EGR_OFFSET_TAIL read-only */
13330 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013331 write_uctxt_csr(dd, i,
13332 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013333 }
13334 }
13335}
13336
13337/*
13338 * Set sc2vl tables.
13339 *
13340 * They power on to zeros, so to avoid send context errors
13341 * they need to be set:
13342 *
13343 * SC 0-7 -> VL 0-7 (respectively)
13344 * SC 15 -> VL 15
13345 * otherwise
13346 * -> VL 0
13347 */
13348static void init_sc2vl_tables(struct hfi1_devdata *dd)
13349{
13350 int i;
13351 /* init per architecture spec, constrained by hardware capability */
13352
13353 /* HFI maps sent packets */
13354 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13355 0,
13356 0, 0, 1, 1,
13357 2, 2, 3, 3,
13358 4, 4, 5, 5,
13359 6, 6, 7, 7));
13360 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13361 1,
13362 8, 0, 9, 0,
13363 10, 0, 11, 0,
13364 12, 0, 13, 0,
13365 14, 0, 15, 15));
13366 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13367 2,
13368 16, 0, 17, 0,
13369 18, 0, 19, 0,
13370 20, 0, 21, 0,
13371 22, 0, 23, 0));
13372 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13373 3,
13374 24, 0, 25, 0,
13375 26, 0, 27, 0,
13376 28, 0, 29, 0,
13377 30, 0, 31, 0));
13378
13379 /* DC maps received packets */
13380 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13381 15_0,
13382 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13383 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13384 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13385 31_16,
13386 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13387 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13388
13389 /* initialize the cached sc2vl values consistently with h/w */
13390 for (i = 0; i < 32; i++) {
13391 if (i < 8 || i == 15)
13392 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13393 else
13394 *((u8 *)(dd->sc2vl) + i) = 0;
13395 }
13396}
13397
13398/*
13399 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13400 * depend on the chip going through a power-on reset - a driver may be loaded
13401 * and unloaded many times.
13402 *
13403 * Do not write any CSR values to the chip in this routine - there may be
13404 * a reset following the (possible) FLR in this routine.
13405 *
13406 */
13407static void init_chip(struct hfi1_devdata *dd)
13408{
13409 int i;
13410
13411 /*
13412 * Put the HFI CSRs in a known state.
13413 * Combine this with a DC reset.
13414 *
13415 * Stop the device from doing anything while we do a
13416 * reset. We know there are no other active users of
13417 * the device since we are now in charge. Turn off
13418 * off all outbound and inbound traffic and make sure
13419 * the device does not generate any interrupts.
13420 */
13421
13422 /* disable send contexts and SDMA engines */
13423 write_csr(dd, SEND_CTRL, 0);
13424 for (i = 0; i < dd->chip_send_contexts; i++)
13425 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13426 for (i = 0; i < dd->chip_sdma_engines; i++)
13427 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13428 /* disable port (turn off RXE inbound traffic) and contexts */
13429 write_csr(dd, RCV_CTRL, 0);
13430 for (i = 0; i < dd->chip_rcv_contexts; i++)
13431 write_csr(dd, RCV_CTXT_CTRL, 0);
13432 /* mask all interrupt sources */
13433 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013434 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013435
13436 /*
13437 * DC Reset: do a full DC reset before the register clear.
13438 * A recommended length of time to hold is one CSR read,
13439 * so reread the CceDcCtrl. Then, hold the DC in reset
13440 * across the clear.
13441 */
13442 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013443 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013444
13445 if (use_flr) {
13446 /*
13447 * A FLR will reset the SPC core and part of the PCIe.
13448 * The parts that need to be restored have already been
13449 * saved.
13450 */
13451 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13452
13453 /* do the FLR, the DC reset will remain */
13454 hfi1_pcie_flr(dd);
13455
13456 /* restore command and BARs */
13457 restore_pci_variables(dd);
13458
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013459 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013460 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13461 hfi1_pcie_flr(dd);
13462 restore_pci_variables(dd);
13463 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013464 } else {
13465 dd_dev_info(dd, "Resetting CSRs with writes\n");
13466 reset_cce_csrs(dd);
13467 reset_txe_csrs(dd);
13468 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013469 reset_misc_csrs(dd);
13470 }
13471 /* clear the DC reset */
13472 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013473
Mike Marciniszyn77241052015-07-30 15:17:43 -040013474 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013475 setextled(dd, 0);
13476
Mike Marciniszyn77241052015-07-30 15:17:43 -040013477 /*
13478 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013479 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013480 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013481 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013482 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013483 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013484 * I2CCLK and I2CDAT will change per direction, and INT_N and
13485 * MODPRS_N are input only and their value is ignored.
13486 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013487 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13488 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013489 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013490}
13491
13492static void init_early_variables(struct hfi1_devdata *dd)
13493{
13494 int i;
13495
13496 /* assign link credit variables */
13497 dd->vau = CM_VAU;
13498 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013499 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013500 dd->link_credits--;
13501 dd->vcu = cu_to_vcu(hfi1_cu);
13502 /* enough room for 8 MAD packets plus header - 17K */
13503 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13504 if (dd->vl15_init > dd->link_credits)
13505 dd->vl15_init = dd->link_credits;
13506
13507 write_uninitialized_csrs_and_memories(dd);
13508
13509 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13510 for (i = 0; i < dd->num_pports; i++) {
13511 struct hfi1_pportdata *ppd = &dd->pport[i];
13512
13513 set_partition_keys(ppd);
13514 }
13515 init_sc2vl_tables(dd);
13516}
13517
13518static void init_kdeth_qp(struct hfi1_devdata *dd)
13519{
13520 /* user changed the KDETH_QP */
13521 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13522 /* out of range or illegal value */
13523 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13524 kdeth_qp = 0;
13525 }
13526 if (kdeth_qp == 0) /* not set, or failed range check */
13527 kdeth_qp = DEFAULT_KDETH_QP;
13528
13529 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013530 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13531 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013532
13533 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013534 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13535 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013536}
13537
13538/**
13539 * init_qpmap_table
13540 * @dd - device data
13541 * @first_ctxt - first context
13542 * @last_ctxt - first context
13543 *
13544 * This return sets the qpn mapping table that
13545 * is indexed by qpn[8:1].
13546 *
13547 * The routine will round robin the 256 settings
13548 * from first_ctxt to last_ctxt.
13549 *
13550 * The first/last looks ahead to having specialized
13551 * receive contexts for mgmt and bypass. Normal
13552 * verbs traffic will assumed to be on a range
13553 * of receive contexts.
13554 */
13555static void init_qpmap_table(struct hfi1_devdata *dd,
13556 u32 first_ctxt,
13557 u32 last_ctxt)
13558{
13559 u64 reg = 0;
13560 u64 regno = RCV_QP_MAP_TABLE;
13561 int i;
13562 u64 ctxt = first_ctxt;
13563
Dean Luick60d585ad2016-04-12 10:50:35 -070013564 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013565 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013566 ctxt++;
13567 if (ctxt > last_ctxt)
13568 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070013569 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013570 write_csr(dd, regno, reg);
13571 reg = 0;
13572 regno += 8;
13573 }
13574 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013575
13576 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13577 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13578}
13579
Dean Luick372cc85a2016-04-12 11:30:51 -070013580struct rsm_map_table {
13581 u64 map[NUM_MAP_REGS];
13582 unsigned int used;
13583};
13584
Dean Luickb12349a2016-04-12 11:31:33 -070013585struct rsm_rule_data {
13586 u8 offset;
13587 u8 pkt_type;
13588 u32 field1_off;
13589 u32 field2_off;
13590 u32 index1_off;
13591 u32 index1_width;
13592 u32 index2_off;
13593 u32 index2_width;
13594 u32 mask1;
13595 u32 value1;
13596 u32 mask2;
13597 u32 value2;
13598};
13599
Dean Luick372cc85a2016-04-12 11:30:51 -070013600/*
13601 * Return an initialized RMT map table for users to fill in. OK if it
13602 * returns NULL, indicating no table.
13603 */
13604static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13605{
13606 struct rsm_map_table *rmt;
13607 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13608
13609 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13610 if (rmt) {
13611 memset(rmt->map, rxcontext, sizeof(rmt->map));
13612 rmt->used = 0;
13613 }
13614
13615 return rmt;
13616}
13617
13618/*
13619 * Write the final RMT map table to the chip and free the table. OK if
13620 * table is NULL.
13621 */
13622static void complete_rsm_map_table(struct hfi1_devdata *dd,
13623 struct rsm_map_table *rmt)
13624{
13625 int i;
13626
13627 if (rmt) {
13628 /* write table to chip */
13629 for (i = 0; i < NUM_MAP_REGS; i++)
13630 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13631
13632 /* enable RSM */
13633 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13634 }
13635}
13636
Dean Luickb12349a2016-04-12 11:31:33 -070013637/*
13638 * Add a receive side mapping rule.
13639 */
13640static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13641 struct rsm_rule_data *rrd)
13642{
13643 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13644 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13645 1ull << rule_index | /* enable bit */
13646 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13647 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13648 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13649 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13650 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13651 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13652 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13653 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13654 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13655 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13656 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13657 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13658 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13659}
13660
Dean Luick4a818be2016-04-12 11:31:11 -070013661/* return the number of RSM map table entries that will be used for QOS */
13662static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13663 unsigned int *np)
13664{
13665 int i;
13666 unsigned int m, n;
13667 u8 max_by_vl = 0;
13668
13669 /* is QOS active at all? */
13670 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13671 num_vls == 1 ||
13672 krcvqsset <= 1)
13673 goto no_qos;
13674
13675 /* determine bits for qpn */
13676 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13677 if (krcvqs[i] > max_by_vl)
13678 max_by_vl = krcvqs[i];
13679 if (max_by_vl > 32)
13680 goto no_qos;
13681 m = ilog2(__roundup_pow_of_two(max_by_vl));
13682
13683 /* determine bits for vl */
13684 n = ilog2(__roundup_pow_of_two(num_vls));
13685
13686 /* reject if too much is used */
13687 if ((m + n) > 7)
13688 goto no_qos;
13689
13690 if (mp)
13691 *mp = m;
13692 if (np)
13693 *np = n;
13694
13695 return 1 << (m + n);
13696
13697no_qos:
13698 if (mp)
13699 *mp = 0;
13700 if (np)
13701 *np = 0;
13702 return 0;
13703}
13704
Mike Marciniszyn77241052015-07-30 15:17:43 -040013705/**
13706 * init_qos - init RX qos
13707 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070013708 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040013709 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013710 * This routine initializes Rule 0 and the RSM map table to implement
13711 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040013712 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013713 * If all of the limit tests succeed, qos is applied based on the array
13714 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013715 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013716 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13717 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013718 */
Dean Luick372cc85a2016-04-12 11:30:51 -070013719static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013720{
Dean Luickb12349a2016-04-12 11:31:33 -070013721 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013722 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070013723 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013724 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013725
Dean Luick4a818be2016-04-12 11:31:11 -070013726 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013727 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013728 rmt_entries = qos_rmt_entries(dd, &m, &n);
13729 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013730 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013731 qpns_per_vl = 1 << m;
13732
Dean Luick372cc85a2016-04-12 11:30:51 -070013733 /* enough room in the map table? */
13734 rmt_entries = 1 << (m + n);
13735 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013736 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013737
Dean Luick372cc85a2016-04-12 11:30:51 -070013738 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070013739 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013740 unsigned tctxt;
13741
13742 for (qpn = 0, tctxt = ctxt;
13743 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13744 unsigned idx, regoff, regidx;
13745
Dean Luick372cc85a2016-04-12 11:30:51 -070013746 /* generate the index the hardware will produce */
13747 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013748 regoff = (idx % 8) * 8;
13749 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070013750 /* replace default with context number */
13751 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040013752 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13753 << regoff);
13754 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070013755 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013756 if (tctxt == ctxt + krcvqs[i])
13757 tctxt = ctxt;
13758 }
13759 ctxt += krcvqs[i];
13760 }
Dean Luickb12349a2016-04-12 11:31:33 -070013761
13762 rrd.offset = rmt->used;
13763 rrd.pkt_type = 2;
13764 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13765 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13766 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13767 rrd.index1_width = n;
13768 rrd.index2_off = QPN_SELECT_OFFSET;
13769 rrd.index2_width = m + n;
13770 rrd.mask1 = LRH_BTH_MASK;
13771 rrd.value1 = LRH_BTH_VALUE;
13772 rrd.mask2 = LRH_SC_MASK;
13773 rrd.value2 = LRH_SC_VALUE;
13774
13775 /* add rule 0 */
13776 add_rsm_rule(dd, 0, &rrd);
13777
Dean Luick372cc85a2016-04-12 11:30:51 -070013778 /* mark RSM map entries as used */
13779 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070013780 /* map everything else to the mcast/err/vl15 context */
13781 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013782 dd->qos_shift = n + 1;
13783 return;
13784bail:
13785 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013786 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013787}
13788
Dean Luick8f000f72016-04-12 11:32:06 -070013789static void init_user_fecn_handling(struct hfi1_devdata *dd,
13790 struct rsm_map_table *rmt)
13791{
13792 struct rsm_rule_data rrd;
13793 u64 reg;
13794 int i, idx, regoff, regidx;
13795 u8 offset;
13796
13797 /* there needs to be enough room in the map table */
13798 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13799 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13800 return;
13801 }
13802
13803 /*
13804 * RSM will extract the destination context as an index into the
13805 * map table. The destination contexts are a sequential block
13806 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13807 * Map entries are accessed as offset + extracted value. Adjust
13808 * the added offset so this sequence can be placed anywhere in
13809 * the table - as long as the entries themselves do not wrap.
13810 * There are only enough bits in offset for the table size, so
13811 * start with that to allow for a "negative" offset.
13812 */
13813 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13814 (int)dd->first_user_ctxt);
13815
13816 for (i = dd->first_user_ctxt, idx = rmt->used;
13817 i < dd->num_rcv_contexts; i++, idx++) {
13818 /* replace with identity mapping */
13819 regoff = (idx % 8) * 8;
13820 regidx = idx / 8;
13821 reg = rmt->map[regidx];
13822 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13823 reg |= (u64)i << regoff;
13824 rmt->map[regidx] = reg;
13825 }
13826
13827 /*
13828 * For RSM intercept of Expected FECN packets:
13829 * o packet type 0 - expected
13830 * o match on F (bit 95), using select/match 1, and
13831 * o match on SH (bit 133), using select/match 2.
13832 *
13833 * Use index 1 to extract the 8-bit receive context from DestQP
13834 * (start at bit 64). Use that as the RSM map table index.
13835 */
13836 rrd.offset = offset;
13837 rrd.pkt_type = 0;
13838 rrd.field1_off = 95;
13839 rrd.field2_off = 133;
13840 rrd.index1_off = 64;
13841 rrd.index1_width = 8;
13842 rrd.index2_off = 0;
13843 rrd.index2_width = 0;
13844 rrd.mask1 = 1;
13845 rrd.value1 = 1;
13846 rrd.mask2 = 1;
13847 rrd.value2 = 1;
13848
13849 /* add rule 1 */
13850 add_rsm_rule(dd, 1, &rrd);
13851
13852 rmt->used += dd->num_user_contexts;
13853}
13854
Mike Marciniszyn77241052015-07-30 15:17:43 -040013855static void init_rxe(struct hfi1_devdata *dd)
13856{
Dean Luick372cc85a2016-04-12 11:30:51 -070013857 struct rsm_map_table *rmt;
13858
Mike Marciniszyn77241052015-07-30 15:17:43 -040013859 /* enable all receive errors */
13860 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070013861
13862 rmt = alloc_rsm_map_table(dd);
13863 /* set up QOS, including the QPN map table */
13864 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070013865 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070013866 complete_rsm_map_table(dd, rmt);
13867 kfree(rmt);
13868
Mike Marciniszyn77241052015-07-30 15:17:43 -040013869 /*
13870 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13871 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13872 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13873 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13874 * Max_PayLoad_Size set to its minimum of 128.
13875 *
13876 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13877 * (64 bytes). Max_Payload_Size is possibly modified upward in
13878 * tune_pcie_caps() which is called after this routine.
13879 */
13880}
13881
13882static void init_other(struct hfi1_devdata *dd)
13883{
13884 /* enable all CCE errors */
13885 write_csr(dd, CCE_ERR_MASK, ~0ull);
13886 /* enable *some* Misc errors */
13887 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13888 /* enable all DC errors, except LCB */
13889 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13890 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13891}
13892
13893/*
13894 * Fill out the given AU table using the given CU. A CU is defined in terms
13895 * AUs. The table is a an encoding: given the index, how many AUs does that
13896 * represent?
13897 *
13898 * NOTE: Assumes that the register layout is the same for the
13899 * local and remote tables.
13900 */
13901static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13902 u32 csr0to3, u32 csr4to7)
13903{
13904 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080013905 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13906 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13907 2ull * cu <<
13908 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13909 4ull * cu <<
13910 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013911 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080013912 8ull * cu <<
13913 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13914 16ull * cu <<
13915 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13916 32ull * cu <<
13917 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13918 64ull * cu <<
13919 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013920}
13921
13922static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13923{
13924 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013925 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013926}
13927
13928void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13929{
13930 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013931 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013932}
13933
13934static void init_txe(struct hfi1_devdata *dd)
13935{
13936 int i;
13937
13938 /* enable all PIO, SDMA, general, and Egress errors */
13939 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13940 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13941 write_csr(dd, SEND_ERR_MASK, ~0ull);
13942 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13943
13944 /* enable all per-context and per-SDMA engine errors */
13945 for (i = 0; i < dd->chip_send_contexts; i++)
13946 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13947 for (i = 0; i < dd->chip_sdma_engines; i++)
13948 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13949
13950 /* set the local CU to AU mapping */
13951 assign_local_cm_au_table(dd, dd->vcu);
13952
13953 /*
13954 * Set reasonable default for Credit Return Timer
13955 * Don't set on Simulator - causes it to choke.
13956 */
13957 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13958 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13959}
13960
13961int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13962{
13963 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13964 unsigned sctxt;
13965 int ret = 0;
13966 u64 reg;
13967
13968 if (!rcd || !rcd->sc) {
13969 ret = -EINVAL;
13970 goto done;
13971 }
13972 sctxt = rcd->sc->hw_context;
13973 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13974 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13975 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13976 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13977 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13978 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13979 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13980 /*
13981 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013982 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013983 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013984 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13985 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13986 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13987 }
13988
13989 /* Enable J_KEY check on receive context. */
13990 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13991 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13992 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13993 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13994done:
13995 return ret;
13996}
13997
13998int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13999{
14000 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14001 unsigned sctxt;
14002 int ret = 0;
14003 u64 reg;
14004
14005 if (!rcd || !rcd->sc) {
14006 ret = -EINVAL;
14007 goto done;
14008 }
14009 sctxt = rcd->sc->hw_context;
14010 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14011 /*
14012 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14013 * This check would not have been enabled for A0 h/w, see
14014 * set_ctxt_jkey().
14015 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014016 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014017 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14018 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14019 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14020 }
14021 /* Turn off the J_KEY on the receive side */
14022 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14023done:
14024 return ret;
14025}
14026
14027int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14028{
14029 struct hfi1_ctxtdata *rcd;
14030 unsigned sctxt;
14031 int ret = 0;
14032 u64 reg;
14033
Jubin Johne4909742016-02-14 20:22:00 -080014034 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014035 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014036 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014037 ret = -EINVAL;
14038 goto done;
14039 }
14040 if (!rcd || !rcd->sc) {
14041 ret = -EINVAL;
14042 goto done;
14043 }
14044 sctxt = rcd->sc->hw_context;
14045 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14046 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14047 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14048 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14049 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014050 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014051 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14052done:
14053 return ret;
14054}
14055
14056int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14057{
14058 struct hfi1_ctxtdata *rcd;
14059 unsigned sctxt;
14060 int ret = 0;
14061 u64 reg;
14062
Jubin Johne4909742016-02-14 20:22:00 -080014063 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014064 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014065 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014066 ret = -EINVAL;
14067 goto done;
14068 }
14069 if (!rcd || !rcd->sc) {
14070 ret = -EINVAL;
14071 goto done;
14072 }
14073 sctxt = rcd->sc->hw_context;
14074 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14075 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14076 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14077 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14078done:
14079 return ret;
14080}
14081
14082/*
14083 * Start doing the clean up the the chip. Our clean up happens in multiple
14084 * stages and this is just the first.
14085 */
14086void hfi1_start_cleanup(struct hfi1_devdata *dd)
14087{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014088 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014089 free_cntrs(dd);
14090 free_rcverr(dd);
14091 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014092 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014093}
14094
14095#define HFI_BASE_GUID(dev) \
14096 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14097
14098/*
Dean Luick78eb1292016-03-05 08:49:45 -080014099 * Information can be shared between the two HFIs on the same ASIC
14100 * in the same OS. This function finds the peer device and sets
14101 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014102 */
Dean Luick78eb1292016-03-05 08:49:45 -080014103static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014104{
14105 unsigned long flags;
14106 struct hfi1_devdata *tmp, *peer = NULL;
Dean Luick78eb1292016-03-05 08:49:45 -080014107 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014108
14109 spin_lock_irqsave(&hfi1_devs_lock, flags);
14110 /* Find our peer device */
14111 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14112 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14113 dd->unit != tmp->unit) {
14114 peer = tmp;
14115 break;
14116 }
14117 }
14118
Dean Luick78eb1292016-03-05 08:49:45 -080014119 if (peer) {
14120 dd->asic_data = peer->asic_data;
14121 } else {
14122 dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14123 if (!dd->asic_data) {
14124 ret = -ENOMEM;
14125 goto done;
14126 }
14127 mutex_init(&dd->asic_data->asic_resource_mutex);
14128 }
14129 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14130
14131done:
Mike Marciniszyn77241052015-07-30 15:17:43 -040014132 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luick78eb1292016-03-05 08:49:45 -080014133 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014134}
14135
Dean Luick5d9157a2015-11-16 21:59:34 -050014136/*
14137 * Set dd->boardname. Use a generic name if a name is not returned from
14138 * EFI variable space.
14139 *
14140 * Return 0 on success, -ENOMEM if space could not be allocated.
14141 */
14142static int obtain_boardname(struct hfi1_devdata *dd)
14143{
14144 /* generic board description */
14145 const char generic[] =
14146 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14147 unsigned long size;
14148 int ret;
14149
14150 ret = read_hfi1_efi_var(dd, "description", &size,
14151 (void **)&dd->boardname);
14152 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014153 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014154 /* use generic description */
14155 dd->boardname = kstrdup(generic, GFP_KERNEL);
14156 if (!dd->boardname)
14157 return -ENOMEM;
14158 }
14159 return 0;
14160}
14161
Kaike Wan24487dd2016-02-26 13:33:23 -080014162/*
14163 * Check the interrupt registers to make sure that they are mapped correctly.
14164 * It is intended to help user identify any mismapping by VMM when the driver
14165 * is running in a VM. This function should only be called before interrupt
14166 * is set up properly.
14167 *
14168 * Return 0 on success, -EINVAL on failure.
14169 */
14170static int check_int_registers(struct hfi1_devdata *dd)
14171{
14172 u64 reg;
14173 u64 all_bits = ~(u64)0;
14174 u64 mask;
14175
14176 /* Clear CceIntMask[0] to avoid raising any interrupts */
14177 mask = read_csr(dd, CCE_INT_MASK);
14178 write_csr(dd, CCE_INT_MASK, 0ull);
14179 reg = read_csr(dd, CCE_INT_MASK);
14180 if (reg)
14181 goto err_exit;
14182
14183 /* Clear all interrupt status bits */
14184 write_csr(dd, CCE_INT_CLEAR, all_bits);
14185 reg = read_csr(dd, CCE_INT_STATUS);
14186 if (reg)
14187 goto err_exit;
14188
14189 /* Set all interrupt status bits */
14190 write_csr(dd, CCE_INT_FORCE, all_bits);
14191 reg = read_csr(dd, CCE_INT_STATUS);
14192 if (reg != all_bits)
14193 goto err_exit;
14194
14195 /* Restore the interrupt mask */
14196 write_csr(dd, CCE_INT_CLEAR, all_bits);
14197 write_csr(dd, CCE_INT_MASK, mask);
14198
14199 return 0;
14200err_exit:
14201 write_csr(dd, CCE_INT_MASK, mask);
14202 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14203 return -EINVAL;
14204}
14205
Mike Marciniszyn77241052015-07-30 15:17:43 -040014206/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014207 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014208 * @dev: the pci_dev for hfi1_ib device
14209 * @ent: pci_device_id struct for this dev
14210 *
14211 * Also allocates, initializes, and returns the devdata struct for this
14212 * device instance
14213 *
14214 * This is global, and is called directly at init to set up the
14215 * chip-specific function pointers for later use.
14216 */
14217struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14218 const struct pci_device_id *ent)
14219{
14220 struct hfi1_devdata *dd;
14221 struct hfi1_pportdata *ppd;
14222 u64 reg;
14223 int i, ret;
14224 static const char * const inames[] = { /* implementation names */
14225 "RTL silicon",
14226 "RTL VCS simulation",
14227 "RTL FPGA emulation",
14228 "Functional simulator"
14229 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014230 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014231
Jubin John17fb4f22016-02-14 20:21:52 -080014232 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14233 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014234 if (IS_ERR(dd))
14235 goto bail;
14236 ppd = dd->pport;
14237 for (i = 0; i < dd->num_pports; i++, ppd++) {
14238 int vl;
14239 /* init common fields */
14240 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14241 /* DC supports 4 link widths */
14242 ppd->link_width_supported =
14243 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14244 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14245 ppd->link_width_downgrade_supported =
14246 ppd->link_width_supported;
14247 /* start out enabling only 4X */
14248 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14249 ppd->link_width_downgrade_enabled =
14250 ppd->link_width_downgrade_supported;
14251 /* link width active is 0 when link is down */
14252 /* link width downgrade active is 0 when link is down */
14253
Jubin Johnd0d236e2016-02-14 20:20:15 -080014254 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14255 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014256 hfi1_early_err(&pdev->dev,
14257 "Invalid num_vls %u, using %u VLs\n",
14258 num_vls, HFI1_MAX_VLS_SUPPORTED);
14259 num_vls = HFI1_MAX_VLS_SUPPORTED;
14260 }
14261 ppd->vls_supported = num_vls;
14262 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014263 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014264 /* Set the default MTU. */
14265 for (vl = 0; vl < num_vls; vl++)
14266 dd->vld[vl].mtu = hfi1_max_mtu;
14267 dd->vld[15].mtu = MAX_MAD_PACKET;
14268 /*
14269 * Set the initial values to reasonable default, will be set
14270 * for real when link is up.
14271 */
14272 ppd->lstate = IB_PORT_DOWN;
14273 ppd->overrun_threshold = 0x4;
14274 ppd->phy_error_threshold = 0xf;
14275 ppd->port_crc_mode_enabled = link_crc_mask;
14276 /* initialize supported LTP CRC mode */
14277 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14278 /* initialize enabled LTP CRC mode */
14279 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14280 /* start in offline */
14281 ppd->host_link_state = HLS_DN_OFFLINE;
14282 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014283 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014284 }
14285
14286 dd->link_default = HLS_DN_POLL;
14287
14288 /*
14289 * Do remaining PCIe setup and save PCIe values in dd.
14290 * Any error printing is already done by the init code.
14291 * On return, we have the chip mapped.
14292 */
14293 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14294 if (ret < 0)
14295 goto bail_free;
14296
14297 /* verify that reads actually work, save revision for reset check */
14298 dd->revision = read_csr(dd, CCE_REVISION);
14299 if (dd->revision == ~(u64)0) {
14300 dd_dev_err(dd, "cannot read chip CSRs\n");
14301 ret = -EINVAL;
14302 goto bail_cleanup;
14303 }
14304 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14305 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14306 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14307 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14308
Jubin John4d114fd2016-02-14 20:21:43 -080014309 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014310 * Check interrupt registers mapping if the driver has no access to
14311 * the upstream component. In this case, it is likely that the driver
14312 * is running in a VM.
14313 */
14314 if (!parent) {
14315 ret = check_int_registers(dd);
14316 if (ret)
14317 goto bail_cleanup;
14318 }
14319
14320 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014321 * obtain the hardware ID - NOT related to unit, which is a
14322 * software enumeration
14323 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014324 reg = read_csr(dd, CCE_REVISION2);
14325 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14326 & CCE_REVISION2_HFI_ID_MASK;
14327 /* the variable size will remove unwanted bits */
14328 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14329 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14330 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014331 dd->icode < ARRAY_SIZE(inames) ?
14332 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014333
14334 /* speeds the hardware can support */
14335 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14336 /* speeds allowed to run at */
14337 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14338 /* give a reasonable active value, will be set on link up */
14339 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14340
14341 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14342 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14343 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14344 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14345 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14346 /* fix up link widths for emulation _p */
14347 ppd = dd->pport;
14348 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14349 ppd->link_width_supported =
14350 ppd->link_width_enabled =
14351 ppd->link_width_downgrade_supported =
14352 ppd->link_width_downgrade_enabled =
14353 OPA_LINK_WIDTH_1X;
14354 }
14355 /* insure num_vls isn't larger than number of sdma engines */
14356 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14357 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014358 num_vls, dd->chip_sdma_engines);
14359 num_vls = dd->chip_sdma_engines;
14360 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014361 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014362 }
14363
14364 /*
14365 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14366 * Limit the max if larger than the field holds. If timeout is
14367 * non-zero, then the calculated field will be at least 1.
14368 *
14369 * Must be after icode is set up - the cclock rate depends
14370 * on knowing the hardware being used.
14371 */
14372 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14373 if (dd->rcv_intr_timeout_csr >
14374 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14375 dd->rcv_intr_timeout_csr =
14376 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14377 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14378 dd->rcv_intr_timeout_csr = 1;
14379
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014380 /* needs to be done before we look for the peer device */
14381 read_guid(dd);
14382
Dean Luick78eb1292016-03-05 08:49:45 -080014383 /* set up shared ASIC data with peer device */
14384 ret = init_asic_data(dd);
14385 if (ret)
14386 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014387
Mike Marciniszyn77241052015-07-30 15:17:43 -040014388 /* obtain chip sizes, reset chip CSRs */
14389 init_chip(dd);
14390
14391 /* read in the PCIe link speed information */
14392 ret = pcie_speeds(dd);
14393 if (ret)
14394 goto bail_cleanup;
14395
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014396 /* Needs to be called before hfi1_firmware_init */
14397 get_platform_config(dd);
14398
Mike Marciniszyn77241052015-07-30 15:17:43 -040014399 /* read in firmware */
14400 ret = hfi1_firmware_init(dd);
14401 if (ret)
14402 goto bail_cleanup;
14403
14404 /*
14405 * In general, the PCIe Gen3 transition must occur after the
14406 * chip has been idled (so it won't initiate any PCIe transactions
14407 * e.g. an interrupt) and before the driver changes any registers
14408 * (the transition will reset the registers).
14409 *
14410 * In particular, place this call after:
14411 * - init_chip() - the chip will not initiate any PCIe transactions
14412 * - pcie_speeds() - reads the current link speed
14413 * - hfi1_firmware_init() - the needed firmware is ready to be
14414 * downloaded
14415 */
14416 ret = do_pcie_gen3_transition(dd);
14417 if (ret)
14418 goto bail_cleanup;
14419
14420 /* start setting dd values and adjusting CSRs */
14421 init_early_variables(dd);
14422
14423 parse_platform_config(dd);
14424
Dean Luick5d9157a2015-11-16 21:59:34 -050014425 ret = obtain_boardname(dd);
14426 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014427 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014428
14429 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014430 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014431 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014432 (u32)dd->majrev,
14433 (u32)dd->minrev,
14434 (dd->revision >> CCE_REVISION_SW_SHIFT)
14435 & CCE_REVISION_SW_MASK);
14436
Jubin John0852d242016-04-12 11:30:08 -070014437 /*
14438 * The real cpu mask is part of the affinity struct but has to be
14439 * initialized earlier than the rest of the affinity struct because it
14440 * is needed to calculate the number of user contexts in
14441 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14442 * which initializes the rest of the affinity struct members,
14443 * depends on set_up_context_variables() for the number of kernel
14444 * contexts, so it cannot be called before set_up_context_variables().
14445 */
14446 ret = init_real_cpu_mask(dd);
14447 if (ret)
14448 goto bail_cleanup;
14449
Mike Marciniszyn77241052015-07-30 15:17:43 -040014450 ret = set_up_context_variables(dd);
14451 if (ret)
14452 goto bail_cleanup;
14453
14454 /* set initial RXE CSRs */
14455 init_rxe(dd);
14456 /* set initial TXE CSRs */
14457 init_txe(dd);
14458 /* set initial non-RXE, non-TXE CSRs */
14459 init_other(dd);
14460 /* set up KDETH QP prefix in both RX and TX CSRs */
14461 init_kdeth_qp(dd);
14462
Jubin John0852d242016-04-12 11:30:08 -070014463 hfi1_dev_affinity_init(dd);
Mitko Haralanov957558c2016-02-03 14:33:40 -080014464
Mike Marciniszyn77241052015-07-30 15:17:43 -040014465 /* send contexts must be set up before receive contexts */
14466 ret = init_send_contexts(dd);
14467 if (ret)
14468 goto bail_cleanup;
14469
14470 ret = hfi1_create_ctxts(dd);
14471 if (ret)
14472 goto bail_cleanup;
14473
14474 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14475 /*
14476 * rcd[0] is guaranteed to be valid by this point. Also, all
14477 * context are using the same value, as per the module parameter.
14478 */
14479 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14480
14481 ret = init_pervl_scs(dd);
14482 if (ret)
14483 goto bail_cleanup;
14484
14485 /* sdma init */
14486 for (i = 0; i < dd->num_pports; ++i) {
14487 ret = sdma_init(dd, i);
14488 if (ret)
14489 goto bail_cleanup;
14490 }
14491
14492 /* use contexts created by hfi1_create_ctxts */
14493 ret = set_up_interrupts(dd);
14494 if (ret)
14495 goto bail_cleanup;
14496
14497 /* set up LCB access - must be after set_up_interrupts() */
14498 init_lcb_access(dd);
14499
14500 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14501 dd->base_guid & 0xFFFFFF);
14502
14503 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14504 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14505 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14506
14507 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14508 if (ret)
14509 goto bail_clear_intr;
14510 check_fabric_firmware_versions(dd);
14511
14512 thermal_init(dd);
14513
14514 ret = init_cntrs(dd);
14515 if (ret)
14516 goto bail_clear_intr;
14517
14518 ret = init_rcverr(dd);
14519 if (ret)
14520 goto bail_free_cntrs;
14521
14522 ret = eprom_init(dd);
14523 if (ret)
14524 goto bail_free_rcverr;
14525
14526 goto bail;
14527
14528bail_free_rcverr:
14529 free_rcverr(dd);
14530bail_free_cntrs:
14531 free_cntrs(dd);
14532bail_clear_intr:
14533 clean_up_interrupts(dd);
14534bail_cleanup:
14535 hfi1_pcie_ddcleanup(dd);
14536bail_free:
14537 hfi1_free_devdata(dd);
14538 dd = ERR_PTR(ret);
14539bail:
14540 return dd;
14541}
14542
14543static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14544 u32 dw_len)
14545{
14546 u32 delta_cycles;
14547 u32 current_egress_rate = ppd->current_egress_rate;
14548 /* rates here are in units of 10^6 bits/sec */
14549
14550 if (desired_egress_rate == -1)
14551 return 0; /* shouldn't happen */
14552
14553 if (desired_egress_rate >= current_egress_rate)
14554 return 0; /* we can't help go faster, only slower */
14555
14556 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14557 egress_cycles(dw_len * 4, current_egress_rate);
14558
14559 return (u16)delta_cycles;
14560}
14561
Mike Marciniszyn77241052015-07-30 15:17:43 -040014562/**
14563 * create_pbc - build a pbc for transmission
14564 * @flags: special case flags or-ed in built pbc
14565 * @srate: static rate
14566 * @vl: vl
14567 * @dwlen: dword length (header words + data words + pbc words)
14568 *
14569 * Create a PBC with the given flags, rate, VL, and length.
14570 *
14571 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14572 * for verbs, which does not use this PSM feature. The lone other caller
14573 * is for the diagnostic interface which calls this if the user does not
14574 * supply their own PBC.
14575 */
14576u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14577 u32 dw_len)
14578{
14579 u64 pbc, delay = 0;
14580
14581 if (unlikely(srate_mbs))
14582 delay = delay_cycles(ppd, srate_mbs, dw_len);
14583
14584 pbc = flags
14585 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14586 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14587 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14588 | (dw_len & PBC_LENGTH_DWS_MASK)
14589 << PBC_LENGTH_DWS_SHIFT;
14590
14591 return pbc;
14592}
14593
14594#define SBUS_THERMAL 0x4f
14595#define SBUS_THERM_MONITOR_MODE 0x1
14596
14597#define THERM_FAILURE(dev, ret, reason) \
14598 dd_dev_err((dd), \
14599 "Thermal sensor initialization failed: %s (%d)\n", \
14600 (reason), (ret))
14601
14602/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070014603 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014604 *
14605 * After initialization, enable polling of thermal sensor through
14606 * SBus interface. In order for this to work, the SBus Master
14607 * firmware has to be loaded due to the fact that the HW polling
14608 * logic uses SBus interrupts, which are not supported with
14609 * default firmware. Otherwise, no data will be returned through
14610 * the ASIC_STS_THERM CSR.
14611 */
14612static int thermal_init(struct hfi1_devdata *dd)
14613{
14614 int ret = 0;
14615
14616 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080014617 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014618 return ret;
14619
Dean Luick576531f2016-03-05 08:50:01 -080014620 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14621 if (ret) {
14622 THERM_FAILURE(dd, ret, "Acquire SBus");
14623 return ret;
14624 }
14625
Mike Marciniszyn77241052015-07-30 15:17:43 -040014626 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014627 /* Disable polling of thermal readings */
14628 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14629 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014630 /* Thermal Sensor Initialization */
14631 /* Step 1: Reset the Thermal SBus Receiver */
14632 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14633 RESET_SBUS_RECEIVER, 0);
14634 if (ret) {
14635 THERM_FAILURE(dd, ret, "Bus Reset");
14636 goto done;
14637 }
14638 /* Step 2: Set Reset bit in Thermal block */
14639 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14640 WRITE_SBUS_RECEIVER, 0x1);
14641 if (ret) {
14642 THERM_FAILURE(dd, ret, "Therm Block Reset");
14643 goto done;
14644 }
14645 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14646 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14647 WRITE_SBUS_RECEIVER, 0x32);
14648 if (ret) {
14649 THERM_FAILURE(dd, ret, "Write Clock Div");
14650 goto done;
14651 }
14652 /* Step 4: Select temperature mode */
14653 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14654 WRITE_SBUS_RECEIVER,
14655 SBUS_THERM_MONITOR_MODE);
14656 if (ret) {
14657 THERM_FAILURE(dd, ret, "Write Mode Sel");
14658 goto done;
14659 }
14660 /* Step 5: De-assert block reset and start conversion */
14661 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14662 WRITE_SBUS_RECEIVER, 0x2);
14663 if (ret) {
14664 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14665 goto done;
14666 }
14667 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14668 msleep(22);
14669
14670 /* Enable polling of thermal readings */
14671 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080014672
14673 /* Set initialized flag */
14674 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14675 if (ret)
14676 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14677
Mike Marciniszyn77241052015-07-30 15:17:43 -040014678done:
Dean Luick576531f2016-03-05 08:50:01 -080014679 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014680 return ret;
14681}
14682
14683static void handle_temp_err(struct hfi1_devdata *dd)
14684{
14685 struct hfi1_pportdata *ppd = &dd->pport[0];
14686 /*
14687 * Thermal Critical Interrupt
14688 * Put the device into forced freeze mode, take link down to
14689 * offline, and put DC into reset.
14690 */
14691 dd_dev_emerg(dd,
14692 "Critical temperature reached! Forcing device into freeze mode!\n");
14693 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014694 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014695 /*
14696 * Shut DC down as much and as quickly as possible.
14697 *
14698 * Step 1: Take the link down to OFFLINE. This will cause the
14699 * 8051 to put the Serdes in reset. However, we don't want to
14700 * go through the entire link state machine since we want to
14701 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14702 * but rather an attempt to save the chip.
14703 * Code below is almost the same as quiet_serdes() but avoids
14704 * all the extra work and the sleeps.
14705 */
14706 ppd->driver_link_ready = 0;
14707 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080014708 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14709 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014710 /*
14711 * Step 2: Shutdown LCB and 8051
14712 * After shutdown, do not restore DC_CFG_RESET value.
14713 */
14714 dc_shutdown(dd);
14715}