2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
70 #define NUM_IB_PORTS 1
73 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
76 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77 module_param(num_vls, uint, S_IRUGO);
78 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
87 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88 module_param(rcv_intr_timeout, uint, S_IRUGO);
89 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
91 uint rcv_intr_count = 16; /* same as qib */
92 module_param(rcv_intr_count, uint, S_IRUGO);
93 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
95 ushort link_crc_mask = SUPPORTED_CRCS;
96 module_param(link_crc_mask, ushort, S_IRUGO);
97 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
100 module_param_named(loopback, loopback, uint, S_IRUGO);
101 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
103 /* Other driver tunables */
104 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105 static ushort crc_14b_sideband = 1;
106 static uint use_flr = 1;
107 uint quick_linkup; /* skip LNI */
110 u64 flag; /* the flag */
111 char *str; /* description string */
112 u16 extra; /* extra information */
117 /* str must be a string constant */
118 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
121 /* Send Error Consequences */
122 #define SEC_WRITE_DROPPED 0x1
123 #define SEC_PACKET_DROPPED 0x2
124 #define SEC_SC_HALTED 0x4 /* per-context only */
125 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
127 #define DEFAULT_KRCVQS 2
128 #define MIN_KERNEL_KCTXTS 2
129 #define FIRST_KERNEL_KCTXT 1
132 * RSM instance allocation
134 * 1 - User Fecn Handling
137 #define RSM_INS_VERBS 0
138 #define RSM_INS_FECN 1
139 #define RSM_INS_VNIC 2
141 /* Bit offset into the GUID which carries HFI id information */
142 #define GUID_HFI_INDEX_SHIFT 39
144 /* extract the emulation revision */
145 #define emulator_rev(dd) ((dd)->irev >> 8)
146 /* parallel and serial emulation versions are 3 and 4 respectively */
147 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
148 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
150 /* RSM fields for Verbs */
152 #define IB_PACKET_TYPE 2ull
153 #define QW_SHIFT 6ull
155 #define QPN_WIDTH 7ull
157 /* LRH.BTH: QW 0, OFFSET 48 - for match */
158 #define LRH_BTH_QW 0ull
159 #define LRH_BTH_BIT_OFFSET 48ull
160 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
161 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
162 #define LRH_BTH_SELECT
163 #define LRH_BTH_MASK 3ull
164 #define LRH_BTH_VALUE 2ull
166 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
167 #define LRH_SC_QW 0ull
168 #define LRH_SC_BIT_OFFSET 56ull
169 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
170 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
171 #define LRH_SC_MASK 128ull
172 #define LRH_SC_VALUE 0ull
174 /* SC[n..0] QW 0, OFFSET 60 - for select */
175 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
177 /* QPN[m+n:1] QW 1, OFFSET 1 */
178 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
180 /* RSM fields for Vnic */
181 /* L2_TYPE: QW 0, OFFSET 61 - for match */
182 #define L2_TYPE_QW 0ull
183 #define L2_TYPE_BIT_OFFSET 61ull
184 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
185 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
186 #define L2_TYPE_MASK 3ull
187 #define L2_16B_VALUE 2ull
189 /* L4_TYPE QW 1, OFFSET 0 - for match */
190 #define L4_TYPE_QW 1ull
191 #define L4_TYPE_BIT_OFFSET 0ull
192 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
193 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
194 #define L4_16B_TYPE_MASK 0xFFull
195 #define L4_16B_ETH_VALUE 0x78ull
197 /* 16B VESWID - for select */
198 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
199 /* 16B ENTROPY - for select */
200 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
202 /* defines to build power on SC2VL table */
214 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
215 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
216 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
217 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
218 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
219 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
220 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
221 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
224 #define DC_SC_VL_VAL( \
243 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
244 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
245 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
246 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
247 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
248 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
249 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
250 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
251 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
252 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
253 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
254 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
255 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
256 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
257 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
258 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
261 /* all CceStatus sub-block freeze bits */
262 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
263 | CCE_STATUS_RXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_FROZE_SMASK \
265 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
266 /* all CceStatus sub-block TXE pause bits */
267 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
268 | CCE_STATUS_TXE_PAUSED_SMASK \
269 | CCE_STATUS_SDMA_PAUSED_SMASK)
270 /* all CceStatus sub-block RXE pause bits */
271 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
273 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
274 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
279 static struct flag_table cce_err_status_flags[] = {
280 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
281 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
282 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
283 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
284 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
285 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
286 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
287 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
288 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
289 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
290 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
291 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
292 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
293 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
294 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
295 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
296 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
297 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
298 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
300 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
301 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
302 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
303 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
304 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
305 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
306 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
307 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
308 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
309 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
310 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
311 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
312 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
313 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
314 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
315 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
316 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
317 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
318 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
319 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
320 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
321 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
322 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
323 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
324 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
325 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
326 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
327 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
328 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
329 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
330 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
331 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
332 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
333 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
334 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
335 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
336 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
337 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
338 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
339 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
340 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
341 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
342 /*31*/ FLAG_ENTRY0("LATriggered",
343 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
344 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
345 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
346 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
347 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
348 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
349 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
350 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
351 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
352 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
353 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
354 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
355 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
356 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
357 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
358 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
359 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
360 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
361 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
368 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
369 static struct flag_table misc_err_status_flags[] = {
370 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
371 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
372 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
373 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
374 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
375 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
376 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
377 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
378 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
379 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
380 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
381 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
382 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
386 * TXE PIO Error flags and consequences
388 static struct flag_table pio_err_status_flags[] = {
389 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
391 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
392 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
394 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
395 /* 2*/ FLAG_ENTRY("PioCsrParity",
397 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
398 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
400 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
401 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
403 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
404 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
406 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
407 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
409 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
410 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
412 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
413 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
415 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
416 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
418 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
419 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
421 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
422 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
424 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
425 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
427 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
428 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
430 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
431 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
433 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
434 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
436 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
437 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
439 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
440 /*17*/ FLAG_ENTRY("PioInitSmIn",
442 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
443 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
445 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
446 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
448 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
449 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
451 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
452 /*21*/ FLAG_ENTRY("PioWriteDataParity",
454 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
455 /*22*/ FLAG_ENTRY("PioStateMachine",
457 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
458 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
459 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
460 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
461 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
462 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
463 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
464 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
466 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
467 /*26*/ FLAG_ENTRY("PioVlfSopParity",
469 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
470 /*27*/ FLAG_ENTRY("PioVlFifoParity",
472 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
473 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
475 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
476 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
478 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
480 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
482 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
483 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
485 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
486 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
488 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
489 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
491 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
495 /* TXE PIO errors that cause an SPC freeze */
496 #define ALL_PIO_FREEZE_ERR \
497 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
525 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
528 * TXE SDMA Error flags
530 static struct flag_table sdma_err_status_flags[] = {
531 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
532 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
533 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
534 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
535 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
536 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
537 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
538 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
542 /* TXE SDMA errors that cause an SPC freeze */
543 #define ALL_SDMA_FREEZE_ERR \
544 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
546 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
548 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
549 #define PORT_DISCARD_EGRESS_ERRS \
550 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
552 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
555 * TXE Egress Error flags
557 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
558 static struct flag_table egress_err_status_flags[] = {
559 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
560 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
562 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
563 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
564 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
565 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
567 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
568 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
569 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
570 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
572 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
573 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
574 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
575 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
576 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
577 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
578 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
579 SEES(TX_SDMA0_DISALLOWED_PACKET)),
580 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
581 SEES(TX_SDMA1_DISALLOWED_PACKET)),
582 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
583 SEES(TX_SDMA2_DISALLOWED_PACKET)),
584 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
585 SEES(TX_SDMA3_DISALLOWED_PACKET)),
586 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
587 SEES(TX_SDMA4_DISALLOWED_PACKET)),
588 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
589 SEES(TX_SDMA5_DISALLOWED_PACKET)),
590 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
591 SEES(TX_SDMA6_DISALLOWED_PACKET)),
592 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
593 SEES(TX_SDMA7_DISALLOWED_PACKET)),
594 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
595 SEES(TX_SDMA8_DISALLOWED_PACKET)),
596 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
597 SEES(TX_SDMA9_DISALLOWED_PACKET)),
598 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
599 SEES(TX_SDMA10_DISALLOWED_PACKET)),
600 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
601 SEES(TX_SDMA11_DISALLOWED_PACKET)),
602 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
603 SEES(TX_SDMA12_DISALLOWED_PACKET)),
604 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
605 SEES(TX_SDMA13_DISALLOWED_PACKET)),
606 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
607 SEES(TX_SDMA14_DISALLOWED_PACKET)),
608 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
609 SEES(TX_SDMA15_DISALLOWED_PACKET)),
610 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
611 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
612 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
613 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
614 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
615 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
616 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
617 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
618 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
619 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
620 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
621 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
622 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
623 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
624 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
625 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
626 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
627 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
628 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
629 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
630 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
631 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
632 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
633 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
634 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
635 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
636 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
637 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
638 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
639 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
640 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
641 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
642 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
643 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
644 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
645 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
646 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
647 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
648 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
649 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
650 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
651 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
652 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
656 * TXE Egress Error Info flags
658 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
659 static struct flag_table egress_err_info_flags[] = {
660 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
661 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
662 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
664 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
665 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
666 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
667 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
668 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
669 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
670 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
671 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
672 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
673 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
674 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
675 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
676 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
677 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
678 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
679 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
680 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
681 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
684 /* TXE Egress errors that cause an SPC freeze */
685 #define ALL_TXE_EGRESS_FREEZE_ERR \
686 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
687 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
689 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
690 | SEES(TX_LAUNCH_CSR_PARITY) \
691 | SEES(TX_SBRD_CTL_CSR_PARITY) \
692 | SEES(TX_CONFIG_PARITY) \
693 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
701 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
702 | SEES(TX_CREDIT_RETURN_PARITY))
705 * TXE Send error flags
707 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
708 static struct flag_table send_err_status_flags[] = {
709 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
710 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
711 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
715 * TXE Send Context Error flags and consequences
717 static struct flag_table sc_err_status_flags[] = {
718 /* 0*/ FLAG_ENTRY("InconsistentSop",
719 SEC_PACKET_DROPPED | SEC_SC_HALTED,
720 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
721 /* 1*/ FLAG_ENTRY("DisallowedPacket",
722 SEC_PACKET_DROPPED | SEC_SC_HALTED,
723 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
724 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
725 SEC_WRITE_DROPPED | SEC_SC_HALTED,
726 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
727 /* 3*/ FLAG_ENTRY("WriteOverflow",
728 SEC_WRITE_DROPPED | SEC_SC_HALTED,
729 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
730 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
731 SEC_WRITE_DROPPED | SEC_SC_HALTED,
732 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
737 * RXE Receive Error flags
739 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
740 static struct flag_table rxe_err_status_flags[] = {
741 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
742 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
743 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
744 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
745 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
746 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
747 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
748 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
749 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
750 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
751 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
752 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
753 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
754 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
755 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
756 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
757 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
758 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
759 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
760 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
761 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
762 RXES(RBUF_BLOCK_LIST_READ_UNC)),
763 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
764 RXES(RBUF_BLOCK_LIST_READ_COR)),
765 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
766 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
767 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
768 RXES(RBUF_CSR_QENT_CNT_PARITY)),
769 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
770 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
771 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
772 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
773 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
774 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
775 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
776 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
777 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
778 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
779 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
780 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
781 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
782 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
783 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
784 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
785 RXES(RBUF_FL_INITDONE_PARITY)),
786 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
787 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
788 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
789 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
790 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
791 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
792 RXES(LOOKUP_DES_PART1_UNC_COR)),
793 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
794 RXES(LOOKUP_DES_PART2_PARITY)),
795 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
796 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
797 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
798 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
799 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
800 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
801 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
802 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
803 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
804 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
805 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
806 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
807 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
808 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
809 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
810 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
811 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
812 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
813 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
814 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
815 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
816 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
819 /* RXE errors that will trigger an SPC freeze */
820 #define ALL_RXE_FREEZE_ERR \
821 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
864 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
866 #define RXE_FREEZE_ABORT_MASK \
867 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
869 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
874 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
875 static struct flag_table dcc_err_flags[] = {
876 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
877 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
878 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
879 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
881 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
882 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
883 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
884 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
885 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
886 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
887 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
888 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
889 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
890 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
891 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
892 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
893 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
894 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
895 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
896 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
897 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
898 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
899 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
900 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
901 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
902 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
903 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
904 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
905 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
906 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
907 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
909 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
910 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
911 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
912 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
913 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
914 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
915 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
916 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
917 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
918 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
920 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
921 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
927 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
928 static struct flag_table lcb_err_flags[] = {
929 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
930 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
931 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
932 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
933 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
934 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
935 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
936 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
937 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
938 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
939 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
940 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
941 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
942 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
943 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
944 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
945 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
946 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
947 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
948 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
949 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
950 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
951 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
952 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
953 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
954 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
955 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
956 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
957 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
958 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
959 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
960 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
961 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
962 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
963 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
964 LCBE(REDUNDANT_FLIT_PARITY_ERR))
970 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
971 static struct flag_table dc8051_err_flags[] = {
972 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
973 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
974 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
975 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
976 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
977 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
978 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
979 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
980 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
981 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
982 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
986 * DC8051 Information Error flags
988 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
990 static struct flag_table dc8051_info_err_flags[] = {
991 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
992 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
993 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
994 FLAG_ENTRY0("Serdes internal loopback failure",
995 FAILED_SERDES_INTERNAL_LOOPBACK),
996 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
997 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
998 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
999 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
1000 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1002 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1003 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1004 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1005 FLAG_ENTRY0("External Device Request Timeout",
1006 EXTERNAL_DEVICE_REQ_TIMEOUT),
1010 * DC8051 Information Host Information flags
1012 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1014 static struct flag_table dc8051_info_host_msg_flags[] = {
1015 FLAG_ENTRY0("Host request done", 0x0001),
1016 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1017 FLAG_ENTRY0("BC SMA message", 0x0004),
1018 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1019 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1020 FLAG_ENTRY0("External device config request", 0x0020),
1021 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1022 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1023 FLAG_ENTRY0("Link going down", 0x0100),
1024 FLAG_ENTRY0("Link width downgraded", 0x0200),
1027 static u32 encoded_size(u32 size);
1028 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1029 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1030 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1032 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1033 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1034 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1035 u8 *remote_tx_rate, u16 *link_widths);
1036 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1037 u8 *flag_bits, u16 *link_widths);
1038 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1068 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1071 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1073 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1077 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1079 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1080 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1081 static void handle_temp_err(struct hfi1_devdata *dd);
1082 static void dc_shutdown(struct hfi1_devdata *dd);
1083 static void dc_start(struct hfi1_devdata *dd);
1084 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1086 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1087 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1088 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1089 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1092 * Error interrupt table entry. This is used as input to the interrupt
1093 * "clear down" routine used for all second tier error interrupt register.
1094 * Second tier interrupt registers have a single bit representing them
1095 * in the top-level CceIntStatus.
1097 struct err_reg_info {
1098 u32 status; /* status CSR offset */
1099 u32 clear; /* clear CSR offset */
1100 u32 mask; /* mask CSR offset */
1101 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1105 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1106 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1107 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1110 * Helpers for building HFI and DC error interrupt table entries. Different
1111 * helpers are needed because of inconsistent register names.
1113 #define EE(reg, handler, desc) \
1114 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1116 #define DC_EE1(reg, handler, desc) \
1117 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1118 #define DC_EE2(reg, handler, desc) \
1119 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1122 * Table of the "misc" grouping of error interrupts. Each entry refers to
1123 * another register containing more information.
1125 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1126 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1127 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1128 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1129 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1130 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1131 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1132 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1133 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1134 /* the rest are reserved */
1138 * Index into the Various section of the interrupt sources
1139 * corresponding to the Critical Temperature interrupt.
1141 #define TCRIT_INT_SOURCE 4
1144 * SDMA error interrupt entry - refers to another register containing more
1147 static const struct err_reg_info sdma_eng_err =
1148 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1150 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1151 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1152 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1153 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1154 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1155 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1156 /* rest are reserved */
1160 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1161 * register can not be derived from the MTU value because 10K is not
1162 * a power of 2. Therefore, we need a constant. Everything else can
1165 #define DCC_CFG_PORT_MTU_CAP_10240 7
1168 * Table of the DC grouping of error interrupts. Each entry refers to
1169 * another register containing more information.
1171 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1172 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1173 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1174 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1175 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1176 /* the rest are reserved */
1186 * csr to read for name (if applicable)
1191 * offset into dd or ppd to store the counter's value
1201 * accessor for stat element, context either dd or ppd
1203 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1204 int mode, u64 data);
1207 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1208 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1210 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1220 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1222 (counter * 8 + RCV_COUNTER_ARRAY32), \
1223 0, flags | CNTR_32BIT, \
1224 port_access_u32_csr)
1226 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1228 (counter * 8 + RCV_COUNTER_ARRAY32), \
1229 0, flags | CNTR_32BIT, \
1233 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1235 (counter * 8 + RCV_COUNTER_ARRAY64), \
1237 port_access_u64_csr)
1239 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1241 (counter * 8 + RCV_COUNTER_ARRAY64), \
1245 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1246 #define OVR_ELM(ctx) \
1247 CNTR_ELEM("RcvHdrOvr" #ctx, \
1248 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1249 0, CNTR_NORMAL, port_access_u64_csr)
1252 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1254 (counter * 8 + SEND_COUNTER_ARRAY32), \
1255 0, flags | CNTR_32BIT, \
1256 port_access_u32_csr)
1259 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1261 (counter * 8 + SEND_COUNTER_ARRAY64), \
1263 port_access_u64_csr)
1265 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1267 counter * 8 + SEND_COUNTER_ARRAY64, \
1273 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1275 (counter * 8 + CCE_COUNTER_ARRAY32), \
1276 0, flags | CNTR_32BIT, \
1279 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1281 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1282 0, flags | CNTR_32BIT, \
1286 #define DC_PERF_CNTR(name, counter, flags) \
1293 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1301 #define SW_IBP_CNTR(name, cntr) \
1309 * hfi_addr_from_offset - return addr for readq/writeq
1310 * @dd - the dd device
1311 * @offset - the offset of the CSR within bar0
1313 * This routine selects the appropriate base address
1314 * based on the indicated offset.
1316 static inline void __iomem *hfi1_addr_from_offset(
1317 const struct hfi1_devdata *dd,
1320 if (offset >= dd->base2_start)
1321 return dd->kregbase2 + (offset - dd->base2_start);
1322 return dd->kregbase1 + offset;
1326 * read_csr - read CSR at the indicated offset
1327 * @dd - the dd device
1328 * @offset - the offset of the CSR within bar0
1330 * Return: the value read or all FF's if there
1333 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1335 if (dd->flags & HFI1_PRESENT)
1336 return readq(hfi1_addr_from_offset(dd, offset));
1341 * write_csr - write CSR at the indicated offset
1342 * @dd - the dd device
1343 * @offset - the offset of the CSR within bar0
1344 * @value - value to write
1346 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1348 if (dd->flags & HFI1_PRESENT) {
1349 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1351 /* avoid write to RcvArray */
1352 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1354 writeq(value, base);
1359 * get_csr_addr - return te iomem address for offset
1360 * @dd - the dd device
1361 * @offset - the offset of the CSR within bar0
1363 * Return: The iomem address to use in subsequent
1364 * writeq/readq operations.
1366 void __iomem *get_csr_addr(
1367 const struct hfi1_devdata *dd,
1370 if (dd->flags & HFI1_PRESENT)
1371 return hfi1_addr_from_offset(dd, offset);
1375 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1376 int mode, u64 value)
1380 if (mode == CNTR_MODE_R) {
1381 ret = read_csr(dd, csr);
1382 } else if (mode == CNTR_MODE_W) {
1383 write_csr(dd, csr, value);
1386 dd_dev_err(dd, "Invalid cntr register access mode");
1390 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1395 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1396 void *context, int vl, int mode, u64 data)
1398 struct hfi1_devdata *dd = context;
1399 u64 csr = entry->csr;
1401 if (entry->flags & CNTR_SDMA) {
1402 if (vl == CNTR_INVALID_VL)
1406 if (vl != CNTR_INVALID_VL)
1409 return read_write_csr(dd, csr, mode, data);
1412 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1413 void *context, int idx, int mode, u64 data)
1415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1417 if (dd->per_sdma && idx < dd->num_sdma)
1418 return dd->per_sdma[idx].err_cnt;
1422 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1423 void *context, int idx, int mode, u64 data)
1425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1427 if (dd->per_sdma && idx < dd->num_sdma)
1428 return dd->per_sdma[idx].sdma_int_cnt;
1432 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1433 void *context, int idx, int mode, u64 data)
1435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1437 if (dd->per_sdma && idx < dd->num_sdma)
1438 return dd->per_sdma[idx].idle_int_cnt;
1442 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1443 void *context, int idx, int mode,
1446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1448 if (dd->per_sdma && idx < dd->num_sdma)
1449 return dd->per_sdma[idx].progress_int_cnt;
1453 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1454 int vl, int mode, u64 data)
1456 struct hfi1_devdata *dd = context;
1459 u64 csr = entry->csr;
1461 if (entry->flags & CNTR_VL) {
1462 if (vl == CNTR_INVALID_VL)
1466 if (vl != CNTR_INVALID_VL)
1470 val = read_write_csr(dd, csr, mode, data);
1474 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1475 int vl, int mode, u64 data)
1477 struct hfi1_devdata *dd = context;
1478 u32 csr = entry->csr;
1481 if (vl != CNTR_INVALID_VL)
1483 if (mode == CNTR_MODE_R)
1484 ret = read_lcb_csr(dd, csr, &data);
1485 else if (mode == CNTR_MODE_W)
1486 ret = write_lcb_csr(dd, csr, data);
1489 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1493 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1498 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1499 int vl, int mode, u64 data)
1501 struct hfi1_pportdata *ppd = context;
1503 if (vl != CNTR_INVALID_VL)
1505 return read_write_csr(ppd->dd, entry->csr, mode, data);
1508 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1509 void *context, int vl, int mode, u64 data)
1511 struct hfi1_pportdata *ppd = context;
1513 u64 csr = entry->csr;
1515 if (entry->flags & CNTR_VL) {
1516 if (vl == CNTR_INVALID_VL)
1520 if (vl != CNTR_INVALID_VL)
1523 val = read_write_csr(ppd->dd, csr, mode, data);
1527 /* Software defined */
1528 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1533 if (mode == CNTR_MODE_R) {
1535 } else if (mode == CNTR_MODE_W) {
1539 dd_dev_err(dd, "Invalid cntr sw access mode");
1543 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1548 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1549 int vl, int mode, u64 data)
1551 struct hfi1_pportdata *ppd = context;
1553 if (vl != CNTR_INVALID_VL)
1555 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1558 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1559 int vl, int mode, u64 data)
1561 struct hfi1_pportdata *ppd = context;
1563 if (vl != CNTR_INVALID_VL)
1565 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1568 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1569 void *context, int vl, int mode,
1572 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1574 if (vl != CNTR_INVALID_VL)
1576 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1579 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1580 void *context, int vl, int mode, u64 data)
1582 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1586 if (vl == CNTR_INVALID_VL)
1587 counter = &ppd->port_xmit_discards;
1588 else if (vl >= 0 && vl < C_VL_COUNT)
1589 counter = &ppd->port_xmit_discards_vl[vl];
1593 return read_write_sw(ppd->dd, counter, mode, data);
1596 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1597 void *context, int vl, int mode,
1600 struct hfi1_pportdata *ppd = context;
1602 if (vl != CNTR_INVALID_VL)
1605 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1609 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1610 void *context, int vl, int mode, u64 data)
1612 struct hfi1_pportdata *ppd = context;
1614 if (vl != CNTR_INVALID_VL)
1617 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1621 u64 get_all_cpu_total(u64 __percpu *cntr)
1626 for_each_possible_cpu(cpu)
1627 counter += *per_cpu_ptr(cntr, cpu);
1631 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1633 int vl, int mode, u64 data)
1637 if (vl != CNTR_INVALID_VL)
1640 if (mode == CNTR_MODE_R) {
1641 ret = get_all_cpu_total(cntr) - *z_val;
1642 } else if (mode == CNTR_MODE_W) {
1643 /* A write can only zero the counter */
1645 *z_val = get_all_cpu_total(cntr);
1647 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1649 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1656 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1657 void *context, int vl, int mode, u64 data)
1659 struct hfi1_devdata *dd = context;
1661 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1665 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1666 void *context, int vl, int mode, u64 data)
1668 struct hfi1_devdata *dd = context;
1670 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1674 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1675 void *context, int vl, int mode, u64 data)
1677 struct hfi1_devdata *dd = context;
1679 return dd->verbs_dev.n_piowait;
1682 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1683 void *context, int vl, int mode, u64 data)
1685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1687 return dd->verbs_dev.n_piodrain;
1690 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1691 void *context, int vl, int mode, u64 data)
1693 struct hfi1_devdata *dd = context;
1695 return dd->ctx0_seq_drop;
1698 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1699 void *context, int vl, int mode, u64 data)
1701 struct hfi1_devdata *dd = context;
1703 return dd->verbs_dev.n_txwait;
1706 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1707 void *context, int vl, int mode, u64 data)
1709 struct hfi1_devdata *dd = context;
1711 return dd->verbs_dev.n_kmem_wait;
1714 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1715 void *context, int vl, int mode, u64 data)
1717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1723 /* Software counters for the error status bits within MISC_ERR_STATUS */
1724 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1725 void *context, int vl, int mode,
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1730 return dd->misc_err_status_cnt[12];
1733 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1734 void *context, int vl, int mode,
1737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1739 return dd->misc_err_status_cnt[11];
1742 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1743 void *context, int vl, int mode,
1746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1748 return dd->misc_err_status_cnt[10];
1751 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1752 void *context, int vl,
1755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1757 return dd->misc_err_status_cnt[9];
1760 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1761 void *context, int vl, int mode,
1764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1766 return dd->misc_err_status_cnt[8];
1769 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1770 const struct cntr_entry *entry,
1771 void *context, int vl, int mode, u64 data)
1773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1775 return dd->misc_err_status_cnt[7];
1778 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1779 void *context, int vl,
1782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1784 return dd->misc_err_status_cnt[6];
1787 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1788 void *context, int vl, int mode,
1791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1793 return dd->misc_err_status_cnt[5];
1796 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1797 void *context, int vl, int mode,
1800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1802 return dd->misc_err_status_cnt[4];
1805 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1806 void *context, int vl,
1809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1811 return dd->misc_err_status_cnt[3];
1814 static u64 access_misc_csr_write_bad_addr_err_cnt(
1815 const struct cntr_entry *entry,
1816 void *context, int vl, int mode, u64 data)
1818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1820 return dd->misc_err_status_cnt[2];
1823 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1824 void *context, int vl,
1827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1829 return dd->misc_err_status_cnt[1];
1832 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1833 void *context, int vl, int mode,
1836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1838 return dd->misc_err_status_cnt[0];
1842 * Software counter for the aggregate of
1843 * individual CceErrStatus counters
1845 static u64 access_sw_cce_err_status_aggregated_cnt(
1846 const struct cntr_entry *entry,
1847 void *context, int vl, int mode, u64 data)
1849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1851 return dd->sw_cce_err_status_aggregate;
1855 * Software counters corresponding to each of the
1856 * error status bits within CceErrStatus
1858 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1859 void *context, int vl, int mode,
1862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1864 return dd->cce_err_status_cnt[40];
1867 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1868 void *context, int vl, int mode,
1871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1873 return dd->cce_err_status_cnt[39];
1876 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1877 void *context, int vl, int mode,
1880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1882 return dd->cce_err_status_cnt[38];
1885 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1886 void *context, int vl, int mode,
1889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1891 return dd->cce_err_status_cnt[37];
1894 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1895 void *context, int vl, int mode,
1898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1900 return dd->cce_err_status_cnt[36];
1903 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1904 const struct cntr_entry *entry,
1905 void *context, int vl, int mode, u64 data)
1907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1909 return dd->cce_err_status_cnt[35];
1912 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1913 const struct cntr_entry *entry,
1914 void *context, int vl, int mode, u64 data)
1916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1918 return dd->cce_err_status_cnt[34];
1921 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1922 void *context, int vl,
1925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927 return dd->cce_err_status_cnt[33];
1930 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1931 void *context, int vl, int mode,
1934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936 return dd->cce_err_status_cnt[32];
1939 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1940 void *context, int vl, int mode, u64 data)
1942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1944 return dd->cce_err_status_cnt[31];
1947 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1948 void *context, int vl, int mode,
1951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1953 return dd->cce_err_status_cnt[30];
1956 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1957 void *context, int vl, int mode,
1960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1962 return dd->cce_err_status_cnt[29];
1965 static u64 access_pcic_transmit_back_parity_err_cnt(
1966 const struct cntr_entry *entry,
1967 void *context, int vl, int mode, u64 data)
1969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1971 return dd->cce_err_status_cnt[28];
1974 static u64 access_pcic_transmit_front_parity_err_cnt(
1975 const struct cntr_entry *entry,
1976 void *context, int vl, int mode, u64 data)
1978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1980 return dd->cce_err_status_cnt[27];
1983 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1984 void *context, int vl, int mode,
1987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1989 return dd->cce_err_status_cnt[26];
1992 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1993 void *context, int vl, int mode,
1996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1998 return dd->cce_err_status_cnt[25];
2001 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
2002 void *context, int vl, int mode,
2005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2007 return dd->cce_err_status_cnt[24];
2010 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2011 void *context, int vl, int mode,
2014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2016 return dd->cce_err_status_cnt[23];
2019 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2020 void *context, int vl,
2023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2025 return dd->cce_err_status_cnt[22];
2028 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2029 void *context, int vl, int mode,
2032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2034 return dd->cce_err_status_cnt[21];
2037 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2038 const struct cntr_entry *entry,
2039 void *context, int vl, int mode, u64 data)
2041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2043 return dd->cce_err_status_cnt[20];
2046 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2047 void *context, int vl,
2050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2052 return dd->cce_err_status_cnt[19];
2055 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2056 void *context, int vl, int mode,
2059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2061 return dd->cce_err_status_cnt[18];
2064 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2065 void *context, int vl, int mode,
2068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2070 return dd->cce_err_status_cnt[17];
2073 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2074 void *context, int vl, int mode,
2077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2079 return dd->cce_err_status_cnt[16];
2082 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2083 void *context, int vl, int mode,
2086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2088 return dd->cce_err_status_cnt[15];
2091 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2092 void *context, int vl,
2095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2097 return dd->cce_err_status_cnt[14];
2100 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2101 void *context, int vl, int mode,
2104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2106 return dd->cce_err_status_cnt[13];
2109 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2110 const struct cntr_entry *entry,
2111 void *context, int vl, int mode, u64 data)
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2115 return dd->cce_err_status_cnt[12];
2118 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2119 const struct cntr_entry *entry,
2120 void *context, int vl, int mode, u64 data)
2122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2124 return dd->cce_err_status_cnt[11];
2127 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2128 const struct cntr_entry *entry,
2129 void *context, int vl, int mode, u64 data)
2131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2133 return dd->cce_err_status_cnt[10];
2136 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2137 const struct cntr_entry *entry,
2138 void *context, int vl, int mode, u64 data)
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2142 return dd->cce_err_status_cnt[9];
2145 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2146 const struct cntr_entry *entry,
2147 void *context, int vl, int mode, u64 data)
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2151 return dd->cce_err_status_cnt[8];
2154 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl,
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2160 return dd->cce_err_status_cnt[7];
2163 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2164 const struct cntr_entry *entry,
2165 void *context, int vl, int mode, u64 data)
2167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2169 return dd->cce_err_status_cnt[6];
2172 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2173 void *context, int vl, int mode,
2176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2178 return dd->cce_err_status_cnt[5];
2181 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2182 void *context, int vl, int mode,
2185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2187 return dd->cce_err_status_cnt[4];
2190 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2191 const struct cntr_entry *entry,
2192 void *context, int vl, int mode, u64 data)
2194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2196 return dd->cce_err_status_cnt[3];
2199 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2200 void *context, int vl,
2203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2205 return dd->cce_err_status_cnt[2];
2208 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2209 void *context, int vl,
2212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2214 return dd->cce_err_status_cnt[1];
2217 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2218 void *context, int vl, int mode,
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2223 return dd->cce_err_status_cnt[0];
2227 * Software counters corresponding to each of the
2228 * error status bits within RcvErrStatus
2230 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2231 void *context, int vl, int mode,
2234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2236 return dd->rcv_err_status_cnt[63];
2239 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2240 void *context, int vl,
2243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2245 return dd->rcv_err_status_cnt[62];
2248 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2249 void *context, int vl, int mode,
2252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2254 return dd->rcv_err_status_cnt[61];
2257 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2258 void *context, int vl, int mode,
2261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2263 return dd->rcv_err_status_cnt[60];
2266 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2267 void *context, int vl,
2270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2272 return dd->rcv_err_status_cnt[59];
2275 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2276 void *context, int vl,
2279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2281 return dd->rcv_err_status_cnt[58];
2284 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2285 void *context, int vl, int mode,
2288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2290 return dd->rcv_err_status_cnt[57];
2293 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2294 void *context, int vl, int mode,
2297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2299 return dd->rcv_err_status_cnt[56];
2302 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2303 void *context, int vl, int mode,
2306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2308 return dd->rcv_err_status_cnt[55];
2311 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2312 const struct cntr_entry *entry,
2313 void *context, int vl, int mode, u64 data)
2315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2317 return dd->rcv_err_status_cnt[54];
2320 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2321 const struct cntr_entry *entry,
2322 void *context, int vl, int mode, u64 data)
2324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2326 return dd->rcv_err_status_cnt[53];
2329 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2330 void *context, int vl,
2333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2335 return dd->rcv_err_status_cnt[52];
2338 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2339 void *context, int vl,
2342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2344 return dd->rcv_err_status_cnt[51];
2347 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2348 void *context, int vl,
2351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2353 return dd->rcv_err_status_cnt[50];
2356 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2357 void *context, int vl,
2360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2362 return dd->rcv_err_status_cnt[49];
2365 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2366 void *context, int vl,
2369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2371 return dd->rcv_err_status_cnt[48];
2374 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2375 void *context, int vl,
2378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2380 return dd->rcv_err_status_cnt[47];
2383 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2384 void *context, int vl, int mode,
2387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2389 return dd->rcv_err_status_cnt[46];
2392 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2393 const struct cntr_entry *entry,
2394 void *context, int vl, int mode, u64 data)
2396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2398 return dd->rcv_err_status_cnt[45];
2401 static u64 access_rx_lookup_csr_parity_err_cnt(
2402 const struct cntr_entry *entry,
2403 void *context, int vl, int mode, u64 data)
2405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2407 return dd->rcv_err_status_cnt[44];
2410 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2411 const struct cntr_entry *entry,
2412 void *context, int vl, int mode, u64 data)
2414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2416 return dd->rcv_err_status_cnt[43];
2419 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2420 const struct cntr_entry *entry,
2421 void *context, int vl, int mode, u64 data)
2423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2425 return dd->rcv_err_status_cnt[42];
2428 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2429 const struct cntr_entry *entry,
2430 void *context, int vl, int mode, u64 data)
2432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2434 return dd->rcv_err_status_cnt[41];
2437 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2438 const struct cntr_entry *entry,
2439 void *context, int vl, int mode, u64 data)
2441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2443 return dd->rcv_err_status_cnt[40];
2446 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2447 const struct cntr_entry *entry,
2448 void *context, int vl, int mode, u64 data)
2450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2452 return dd->rcv_err_status_cnt[39];
2455 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2456 const struct cntr_entry *entry,
2457 void *context, int vl, int mode, u64 data)
2459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2461 return dd->rcv_err_status_cnt[38];
2464 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2465 const struct cntr_entry *entry,
2466 void *context, int vl, int mode, u64 data)
2468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2470 return dd->rcv_err_status_cnt[37];
2473 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2474 const struct cntr_entry *entry,
2475 void *context, int vl, int mode, u64 data)
2477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2479 return dd->rcv_err_status_cnt[36];
2482 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2483 const struct cntr_entry *entry,
2484 void *context, int vl, int mode, u64 data)
2486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2488 return dd->rcv_err_status_cnt[35];
2491 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2492 const struct cntr_entry *entry,
2493 void *context, int vl, int mode, u64 data)
2495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2497 return dd->rcv_err_status_cnt[34];
2500 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2501 const struct cntr_entry *entry,
2502 void *context, int vl, int mode, u64 data)
2504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2506 return dd->rcv_err_status_cnt[33];
2509 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2510 void *context, int vl, int mode,
2513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2515 return dd->rcv_err_status_cnt[32];
2518 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2519 void *context, int vl, int mode,
2522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2524 return dd->rcv_err_status_cnt[31];
2527 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2528 void *context, int vl, int mode,
2531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2533 return dd->rcv_err_status_cnt[30];
2536 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2537 void *context, int vl, int mode,
2540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2542 return dd->rcv_err_status_cnt[29];
2545 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2546 void *context, int vl,
2549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2551 return dd->rcv_err_status_cnt[28];
2554 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2555 const struct cntr_entry *entry,
2556 void *context, int vl, int mode, u64 data)
2558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2560 return dd->rcv_err_status_cnt[27];
2563 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2564 const struct cntr_entry *entry,
2565 void *context, int vl, int mode, u64 data)
2567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2569 return dd->rcv_err_status_cnt[26];
2572 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2573 const struct cntr_entry *entry,
2574 void *context, int vl, int mode, u64 data)
2576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2578 return dd->rcv_err_status_cnt[25];
2581 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2582 const struct cntr_entry *entry,
2583 void *context, int vl, int mode, u64 data)
2585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2587 return dd->rcv_err_status_cnt[24];
2590 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2591 const struct cntr_entry *entry,
2592 void *context, int vl, int mode, u64 data)
2594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2596 return dd->rcv_err_status_cnt[23];
2599 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2600 const struct cntr_entry *entry,
2601 void *context, int vl, int mode, u64 data)
2603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2605 return dd->rcv_err_status_cnt[22];
2608 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2609 const struct cntr_entry *entry,
2610 void *context, int vl, int mode, u64 data)
2612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2614 return dd->rcv_err_status_cnt[21];
2617 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2618 const struct cntr_entry *entry,
2619 void *context, int vl, int mode, u64 data)
2621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2623 return dd->rcv_err_status_cnt[20];
2626 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2627 const struct cntr_entry *entry,
2628 void *context, int vl, int mode, u64 data)
2630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2632 return dd->rcv_err_status_cnt[19];
2635 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2636 void *context, int vl,
2639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2641 return dd->rcv_err_status_cnt[18];
2644 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2645 void *context, int vl,
2648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2650 return dd->rcv_err_status_cnt[17];
2653 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2654 const struct cntr_entry *entry,
2655 void *context, int vl, int mode, u64 data)
2657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2659 return dd->rcv_err_status_cnt[16];
2662 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2663 const struct cntr_entry *entry,
2664 void *context, int vl, int mode, u64 data)
2666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2668 return dd->rcv_err_status_cnt[15];
2671 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2672 void *context, int vl,
2675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2677 return dd->rcv_err_status_cnt[14];
2680 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2681 void *context, int vl,
2684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2686 return dd->rcv_err_status_cnt[13];
2689 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2690 void *context, int vl, int mode,
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2695 return dd->rcv_err_status_cnt[12];
2698 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2699 void *context, int vl, int mode,
2702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2704 return dd->rcv_err_status_cnt[11];
2707 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2708 void *context, int vl, int mode,
2711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2713 return dd->rcv_err_status_cnt[10];
2716 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2717 void *context, int vl, int mode,
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2722 return dd->rcv_err_status_cnt[9];
2725 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2726 void *context, int vl, int mode,
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2731 return dd->rcv_err_status_cnt[8];
2734 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2735 const struct cntr_entry *entry,
2736 void *context, int vl, int mode, u64 data)
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2740 return dd->rcv_err_status_cnt[7];
2743 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2744 const struct cntr_entry *entry,
2745 void *context, int vl, int mode, u64 data)
2747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2749 return dd->rcv_err_status_cnt[6];
2752 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2753 void *context, int vl, int mode,
2756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2758 return dd->rcv_err_status_cnt[5];
2761 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2762 void *context, int vl, int mode,
2765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2767 return dd->rcv_err_status_cnt[4];
2770 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2771 void *context, int vl, int mode,
2774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2776 return dd->rcv_err_status_cnt[3];
2779 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2780 void *context, int vl, int mode,
2783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2785 return dd->rcv_err_status_cnt[2];
2788 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2789 void *context, int vl, int mode,
2792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2794 return dd->rcv_err_status_cnt[1];
2797 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2798 void *context, int vl, int mode,
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2803 return dd->rcv_err_status_cnt[0];
2807 * Software counters corresponding to each of the
2808 * error status bits within SendPioErrStatus
2810 static u64 access_pio_pec_sop_head_parity_err_cnt(
2811 const struct cntr_entry *entry,
2812 void *context, int vl, int mode, u64 data)
2814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2816 return dd->send_pio_err_status_cnt[35];
2819 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2820 const struct cntr_entry *entry,
2821 void *context, int vl, int mode, u64 data)
2823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2825 return dd->send_pio_err_status_cnt[34];
2828 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2829 const struct cntr_entry *entry,
2830 void *context, int vl, int mode, u64 data)
2832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2834 return dd->send_pio_err_status_cnt[33];
2837 static u64 access_pio_current_free_cnt_parity_err_cnt(
2838 const struct cntr_entry *entry,
2839 void *context, int vl, int mode, u64 data)
2841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2843 return dd->send_pio_err_status_cnt[32];
2846 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2847 void *context, int vl, int mode,
2850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2852 return dd->send_pio_err_status_cnt[31];
2855 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2856 void *context, int vl, int mode,
2859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2861 return dd->send_pio_err_status_cnt[30];
2864 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2865 void *context, int vl, int mode,
2868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2870 return dd->send_pio_err_status_cnt[29];
2873 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2874 const struct cntr_entry *entry,
2875 void *context, int vl, int mode, u64 data)
2877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2879 return dd->send_pio_err_status_cnt[28];
2882 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2883 void *context, int vl, int mode,
2886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2888 return dd->send_pio_err_status_cnt[27];
2891 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2892 void *context, int vl, int mode,
2895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2897 return dd->send_pio_err_status_cnt[26];
2900 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2901 void *context, int vl,
2904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2906 return dd->send_pio_err_status_cnt[25];
2909 static u64 access_pio_block_qw_count_parity_err_cnt(
2910 const struct cntr_entry *entry,
2911 void *context, int vl, int mode, u64 data)
2913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2915 return dd->send_pio_err_status_cnt[24];
2918 static u64 access_pio_write_qw_valid_parity_err_cnt(
2919 const struct cntr_entry *entry,
2920 void *context, int vl, int mode, u64 data)
2922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2924 return dd->send_pio_err_status_cnt[23];
2927 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2928 void *context, int vl, int mode,
2931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2933 return dd->send_pio_err_status_cnt[22];
2936 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2937 void *context, int vl,
2940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2942 return dd->send_pio_err_status_cnt[21];
2945 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2946 void *context, int vl,
2949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2951 return dd->send_pio_err_status_cnt[20];
2954 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2955 void *context, int vl,
2958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2960 return dd->send_pio_err_status_cnt[19];
2963 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2964 const struct cntr_entry *entry,
2965 void *context, int vl, int mode, u64 data)
2967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2969 return dd->send_pio_err_status_cnt[18];
2972 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2973 void *context, int vl, int mode,
2976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2978 return dd->send_pio_err_status_cnt[17];
2981 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2982 void *context, int vl, int mode,
2985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2987 return dd->send_pio_err_status_cnt[16];
2990 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2991 const struct cntr_entry *entry,
2992 void *context, int vl, int mode, u64 data)
2994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2996 return dd->send_pio_err_status_cnt[15];
2999 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
3000 const struct cntr_entry *entry,
3001 void *context, int vl, int mode, u64 data)
3003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3005 return dd->send_pio_err_status_cnt[14];
3008 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
3009 const struct cntr_entry *entry,
3010 void *context, int vl, int mode, u64 data)
3012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3014 return dd->send_pio_err_status_cnt[13];
3017 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3018 const struct cntr_entry *entry,
3019 void *context, int vl, int mode, u64 data)
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3023 return dd->send_pio_err_status_cnt[12];
3026 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3027 const struct cntr_entry *entry,
3028 void *context, int vl, int mode, u64 data)
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3032 return dd->send_pio_err_status_cnt[11];
3035 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3036 const struct cntr_entry *entry,
3037 void *context, int vl, int mode, u64 data)
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3041 return dd->send_pio_err_status_cnt[10];
3044 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3045 const struct cntr_entry *entry,
3046 void *context, int vl, int mode, u64 data)
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3050 return dd->send_pio_err_status_cnt[9];
3053 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3054 const struct cntr_entry *entry,
3055 void *context, int vl, int mode, u64 data)
3057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3059 return dd->send_pio_err_status_cnt[8];
3062 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3063 const struct cntr_entry *entry,
3064 void *context, int vl, int mode, u64 data)
3066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3068 return dd->send_pio_err_status_cnt[7];
3071 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3072 void *context, int vl, int mode,
3075 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3077 return dd->send_pio_err_status_cnt[6];
3080 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3081 void *context, int vl, int mode,
3084 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3086 return dd->send_pio_err_status_cnt[5];
3089 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3090 void *context, int vl, int mode,
3093 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3095 return dd->send_pio_err_status_cnt[4];
3098 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3099 void *context, int vl, int mode,
3102 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3104 return dd->send_pio_err_status_cnt[3];
3107 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3108 void *context, int vl, int mode,
3111 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3113 return dd->send_pio_err_status_cnt[2];
3116 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3117 void *context, int vl,
3120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3122 return dd->send_pio_err_status_cnt[1];
3125 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3126 void *context, int vl, int mode,
3129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3131 return dd->send_pio_err_status_cnt[0];
3135 * Software counters corresponding to each of the
3136 * error status bits within SendDmaErrStatus
3138 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3139 const struct cntr_entry *entry,
3140 void *context, int vl, int mode, u64 data)
3142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3144 return dd->send_dma_err_status_cnt[3];
3147 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3148 const struct cntr_entry *entry,
3149 void *context, int vl, int mode, u64 data)
3151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3153 return dd->send_dma_err_status_cnt[2];
3156 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3157 void *context, int vl, int mode,
3160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3162 return dd->send_dma_err_status_cnt[1];
3165 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3166 void *context, int vl, int mode,
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3171 return dd->send_dma_err_status_cnt[0];
3175 * Software counters corresponding to each of the
3176 * error status bits within SendEgressErrStatus
3178 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3179 const struct cntr_entry *entry,
3180 void *context, int vl, int mode, u64 data)
3182 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3184 return dd->send_egress_err_status_cnt[63];
3187 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3188 const struct cntr_entry *entry,
3189 void *context, int vl, int mode, u64 data)
3191 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3193 return dd->send_egress_err_status_cnt[62];
3196 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3197 void *context, int vl, int mode,
3200 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3202 return dd->send_egress_err_status_cnt[61];
3205 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3206 void *context, int vl,
3209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3211 return dd->send_egress_err_status_cnt[60];
3214 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3215 const struct cntr_entry *entry,
3216 void *context, int vl, int mode, u64 data)
3218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3220 return dd->send_egress_err_status_cnt[59];
3223 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3224 void *context, int vl, int mode,
3227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3229 return dd->send_egress_err_status_cnt[58];
3232 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3233 void *context, int vl, int mode,
3236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3238 return dd->send_egress_err_status_cnt[57];
3241 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3242 void *context, int vl, int mode,
3245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3247 return dd->send_egress_err_status_cnt[56];
3250 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3251 void *context, int vl, int mode,
3254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3256 return dd->send_egress_err_status_cnt[55];
3259 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3260 void *context, int vl, int mode,
3263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3265 return dd->send_egress_err_status_cnt[54];
3268 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3269 void *context, int vl, int mode,
3272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3274 return dd->send_egress_err_status_cnt[53];
3277 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3278 void *context, int vl, int mode,
3281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3283 return dd->send_egress_err_status_cnt[52];
3286 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3287 void *context, int vl, int mode,
3290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3292 return dd->send_egress_err_status_cnt[51];
3295 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3296 void *context, int vl, int mode,
3299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3301 return dd->send_egress_err_status_cnt[50];
3304 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3305 void *context, int vl, int mode,
3308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3310 return dd->send_egress_err_status_cnt[49];
3313 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3314 void *context, int vl, int mode,
3317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3319 return dd->send_egress_err_status_cnt[48];
3322 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3323 void *context, int vl, int mode,
3326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3328 return dd->send_egress_err_status_cnt[47];
3331 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3332 void *context, int vl, int mode,
3335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3337 return dd->send_egress_err_status_cnt[46];
3340 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3341 void *context, int vl, int mode,
3344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3346 return dd->send_egress_err_status_cnt[45];
3349 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3350 void *context, int vl,
3353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3355 return dd->send_egress_err_status_cnt[44];
3358 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3359 const struct cntr_entry *entry,
3360 void *context, int vl, int mode, u64 data)
3362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3364 return dd->send_egress_err_status_cnt[43];
3367 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3368 void *context, int vl, int mode,
3371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3373 return dd->send_egress_err_status_cnt[42];
3376 static u64 access_tx_credit_return_partiy_err_cnt(
3377 const struct cntr_entry *entry,
3378 void *context, int vl, int mode, u64 data)
3380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3382 return dd->send_egress_err_status_cnt[41];
3385 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3386 const struct cntr_entry *entry,
3387 void *context, int vl, int mode, u64 data)
3389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3391 return dd->send_egress_err_status_cnt[40];
3394 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3395 const struct cntr_entry *entry,
3396 void *context, int vl, int mode, u64 data)
3398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3400 return dd->send_egress_err_status_cnt[39];
3403 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3404 const struct cntr_entry *entry,
3405 void *context, int vl, int mode, u64 data)
3407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3409 return dd->send_egress_err_status_cnt[38];
3412 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3413 const struct cntr_entry *entry,
3414 void *context, int vl, int mode, u64 data)
3416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3418 return dd->send_egress_err_status_cnt[37];
3421 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3422 const struct cntr_entry *entry,
3423 void *context, int vl, int mode, u64 data)
3425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3427 return dd->send_egress_err_status_cnt[36];
3430 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3431 const struct cntr_entry *entry,
3432 void *context, int vl, int mode, u64 data)
3434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3436 return dd->send_egress_err_status_cnt[35];
3439 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3440 const struct cntr_entry *entry,
3441 void *context, int vl, int mode, u64 data)
3443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3445 return dd->send_egress_err_status_cnt[34];
3448 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3449 const struct cntr_entry *entry,
3450 void *context, int vl, int mode, u64 data)
3452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3454 return dd->send_egress_err_status_cnt[33];
3457 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3458 const struct cntr_entry *entry,
3459 void *context, int vl, int mode, u64 data)
3461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3463 return dd->send_egress_err_status_cnt[32];
3466 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3467 const struct cntr_entry *entry,
3468 void *context, int vl, int mode, u64 data)
3470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3472 return dd->send_egress_err_status_cnt[31];
3475 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3476 const struct cntr_entry *entry,
3477 void *context, int vl, int mode, u64 data)
3479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3481 return dd->send_egress_err_status_cnt[30];
3484 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3485 const struct cntr_entry *entry,
3486 void *context, int vl, int mode, u64 data)
3488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3490 return dd->send_egress_err_status_cnt[29];
3493 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3494 const struct cntr_entry *entry,
3495 void *context, int vl, int mode, u64 data)
3497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3499 return dd->send_egress_err_status_cnt[28];
3502 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3503 const struct cntr_entry *entry,
3504 void *context, int vl, int mode, u64 data)
3506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3508 return dd->send_egress_err_status_cnt[27];
3511 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3512 const struct cntr_entry *entry,
3513 void *context, int vl, int mode, u64 data)
3515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3517 return dd->send_egress_err_status_cnt[26];
3520 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3521 const struct cntr_entry *entry,
3522 void *context, int vl, int mode, u64 data)
3524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3526 return dd->send_egress_err_status_cnt[25];
3529 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3530 const struct cntr_entry *entry,
3531 void *context, int vl, int mode, u64 data)
3533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3535 return dd->send_egress_err_status_cnt[24];
3538 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3539 const struct cntr_entry *entry,
3540 void *context, int vl, int mode, u64 data)
3542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3544 return dd->send_egress_err_status_cnt[23];
3547 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3548 const struct cntr_entry *entry,
3549 void *context, int vl, int mode, u64 data)
3551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3553 return dd->send_egress_err_status_cnt[22];
3556 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3557 const struct cntr_entry *entry,
3558 void *context, int vl, int mode, u64 data)
3560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3562 return dd->send_egress_err_status_cnt[21];
3565 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3566 const struct cntr_entry *entry,
3567 void *context, int vl, int mode, u64 data)
3569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3571 return dd->send_egress_err_status_cnt[20];
3574 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3575 const struct cntr_entry *entry,
3576 void *context, int vl, int mode, u64 data)
3578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3580 return dd->send_egress_err_status_cnt[19];
3583 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3584 const struct cntr_entry *entry,
3585 void *context, int vl, int mode, u64 data)
3587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3589 return dd->send_egress_err_status_cnt[18];
3592 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3593 const struct cntr_entry *entry,
3594 void *context, int vl, int mode, u64 data)
3596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3598 return dd->send_egress_err_status_cnt[17];
3601 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3602 const struct cntr_entry *entry,
3603 void *context, int vl, int mode, u64 data)
3605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3607 return dd->send_egress_err_status_cnt[16];
3610 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3611 void *context, int vl, int mode,
3614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3616 return dd->send_egress_err_status_cnt[15];
3619 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3620 void *context, int vl,
3623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3625 return dd->send_egress_err_status_cnt[14];
3628 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3629 void *context, int vl, int mode,
3632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3634 return dd->send_egress_err_status_cnt[13];
3637 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3638 void *context, int vl, int mode,
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3643 return dd->send_egress_err_status_cnt[12];
3646 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3647 const struct cntr_entry *entry,
3648 void *context, int vl, int mode, u64 data)
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3652 return dd->send_egress_err_status_cnt[11];
3655 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3656 void *context, int vl, int mode,
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3661 return dd->send_egress_err_status_cnt[10];
3664 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3665 void *context, int vl, int mode,
3668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3670 return dd->send_egress_err_status_cnt[9];
3673 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3674 const struct cntr_entry *entry,
3675 void *context, int vl, int mode, u64 data)
3677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3679 return dd->send_egress_err_status_cnt[8];
3682 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3683 const struct cntr_entry *entry,
3684 void *context, int vl, int mode, u64 data)
3686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3688 return dd->send_egress_err_status_cnt[7];
3691 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3692 void *context, int vl, int mode,
3695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3697 return dd->send_egress_err_status_cnt[6];
3700 static u64 access_tx_incorrect_link_state_err_cnt(
3701 const struct cntr_entry *entry,
3702 void *context, int vl, int mode, u64 data)
3704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3706 return dd->send_egress_err_status_cnt[5];
3709 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3710 void *context, int vl, int mode,
3713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3715 return dd->send_egress_err_status_cnt[4];
3718 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3719 const struct cntr_entry *entry,
3720 void *context, int vl, int mode, u64 data)
3722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3724 return dd->send_egress_err_status_cnt[3];
3727 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3728 void *context, int vl, int mode,
3731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3733 return dd->send_egress_err_status_cnt[2];
3736 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3737 const struct cntr_entry *entry,
3738 void *context, int vl, int mode, u64 data)
3740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3742 return dd->send_egress_err_status_cnt[1];
3745 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3746 const struct cntr_entry *entry,
3747 void *context, int vl, int mode, u64 data)
3749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3751 return dd->send_egress_err_status_cnt[0];
3755 * Software counters corresponding to each of the
3756 * error status bits within SendErrStatus
3758 static u64 access_send_csr_write_bad_addr_err_cnt(
3759 const struct cntr_entry *entry,
3760 void *context, int vl, int mode, u64 data)
3762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3764 return dd->send_err_status_cnt[2];
3767 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3768 void *context, int vl,
3771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3773 return dd->send_err_status_cnt[1];
3776 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3777 void *context, int vl, int mode,
3780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3782 return dd->send_err_status_cnt[0];
3786 * Software counters corresponding to each of the
3787 * error status bits within SendCtxtErrStatus
3789 static u64 access_pio_write_out_of_bounds_err_cnt(
3790 const struct cntr_entry *entry,
3791 void *context, int vl, int mode, u64 data)
3793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3795 return dd->sw_ctxt_err_status_cnt[4];
3798 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3799 void *context, int vl, int mode,
3802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3804 return dd->sw_ctxt_err_status_cnt[3];
3807 static u64 access_pio_write_crosses_boundary_err_cnt(
3808 const struct cntr_entry *entry,
3809 void *context, int vl, int mode, u64 data)
3811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3813 return dd->sw_ctxt_err_status_cnt[2];
3816 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3817 void *context, int vl,
3820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3822 return dd->sw_ctxt_err_status_cnt[1];
3825 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3826 void *context, int vl, int mode,
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3831 return dd->sw_ctxt_err_status_cnt[0];
3835 * Software counters corresponding to each of the
3836 * error status bits within SendDmaEngErrStatus
3838 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3839 const struct cntr_entry *entry,
3840 void *context, int vl, int mode, u64 data)
3842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3844 return dd->sw_send_dma_eng_err_status_cnt[23];
3847 static u64 access_sdma_header_storage_cor_err_cnt(
3848 const struct cntr_entry *entry,
3849 void *context, int vl, int mode, u64 data)
3851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3853 return dd->sw_send_dma_eng_err_status_cnt[22];
3856 static u64 access_sdma_packet_tracking_cor_err_cnt(
3857 const struct cntr_entry *entry,
3858 void *context, int vl, int mode, u64 data)
3860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3862 return dd->sw_send_dma_eng_err_status_cnt[21];
3865 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3866 void *context, int vl, int mode,
3869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3871 return dd->sw_send_dma_eng_err_status_cnt[20];
3874 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3875 void *context, int vl, int mode,
3878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3880 return dd->sw_send_dma_eng_err_status_cnt[19];
3883 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3884 const struct cntr_entry *entry,
3885 void *context, int vl, int mode, u64 data)
3887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3889 return dd->sw_send_dma_eng_err_status_cnt[18];
3892 static u64 access_sdma_header_storage_unc_err_cnt(
3893 const struct cntr_entry *entry,
3894 void *context, int vl, int mode, u64 data)
3896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3898 return dd->sw_send_dma_eng_err_status_cnt[17];
3901 static u64 access_sdma_packet_tracking_unc_err_cnt(
3902 const struct cntr_entry *entry,
3903 void *context, int vl, int mode, u64 data)
3905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3907 return dd->sw_send_dma_eng_err_status_cnt[16];
3910 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3911 void *context, int vl, int mode,
3914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3916 return dd->sw_send_dma_eng_err_status_cnt[15];
3919 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3920 void *context, int vl, int mode,
3923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3925 return dd->sw_send_dma_eng_err_status_cnt[14];
3928 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3929 void *context, int vl, int mode,
3932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3934 return dd->sw_send_dma_eng_err_status_cnt[13];
3937 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3938 void *context, int vl, int mode,
3941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3943 return dd->sw_send_dma_eng_err_status_cnt[12];
3946 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3947 void *context, int vl, int mode,
3950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3952 return dd->sw_send_dma_eng_err_status_cnt[11];
3955 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3956 void *context, int vl, int mode,
3959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3961 return dd->sw_send_dma_eng_err_status_cnt[10];
3964 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3965 void *context, int vl, int mode,
3968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3970 return dd->sw_send_dma_eng_err_status_cnt[9];
3973 static u64 access_sdma_packet_desc_overflow_err_cnt(
3974 const struct cntr_entry *entry,
3975 void *context, int vl, int mode, u64 data)
3977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3979 return dd->sw_send_dma_eng_err_status_cnt[8];
3982 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3983 void *context, int vl,
3986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3988 return dd->sw_send_dma_eng_err_status_cnt[7];
3991 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3992 void *context, int vl, int mode, u64 data)
3994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3996 return dd->sw_send_dma_eng_err_status_cnt[6];
3999 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
4000 void *context, int vl, int mode,
4003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4005 return dd->sw_send_dma_eng_err_status_cnt[5];
4008 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
4009 void *context, int vl, int mode,
4012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4014 return dd->sw_send_dma_eng_err_status_cnt[4];
4017 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4018 const struct cntr_entry *entry,
4019 void *context, int vl, int mode, u64 data)
4021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4023 return dd->sw_send_dma_eng_err_status_cnt[3];
4026 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4027 void *context, int vl, int mode,
4030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4032 return dd->sw_send_dma_eng_err_status_cnt[2];
4035 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4036 void *context, int vl, int mode,
4039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4041 return dd->sw_send_dma_eng_err_status_cnt[1];
4044 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4045 void *context, int vl, int mode,
4048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4050 return dd->sw_send_dma_eng_err_status_cnt[0];
4053 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4054 void *context, int vl, int mode,
4057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4060 u64 csr = entry->csr;
4062 val = read_write_csr(dd, csr, mode, data);
4063 if (mode == CNTR_MODE_R) {
4064 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4065 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4066 } else if (mode == CNTR_MODE_W) {
4067 dd->sw_rcv_bypass_packet_errors = 0;
4069 dd_dev_err(dd, "Invalid cntr register access mode");
4075 #define def_access_sw_cpu(cntr) \
4076 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4077 void *context, int vl, int mode, u64 data) \
4079 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4080 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4081 ppd->ibport_data.rvp.cntr, vl, \
4085 def_access_sw_cpu(rc_acks);
4086 def_access_sw_cpu(rc_qacks);
4087 def_access_sw_cpu(rc_delayed_comp);
4089 #define def_access_ibp_counter(cntr) \
4090 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4091 void *context, int vl, int mode, u64 data) \
4093 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4095 if (vl != CNTR_INVALID_VL) \
4098 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4102 def_access_ibp_counter(loop_pkts);
4103 def_access_ibp_counter(rc_resends);
4104 def_access_ibp_counter(rnr_naks);
4105 def_access_ibp_counter(other_naks);
4106 def_access_ibp_counter(rc_timeouts);
4107 def_access_ibp_counter(pkt_drops);
4108 def_access_ibp_counter(dmawait);
4109 def_access_ibp_counter(rc_seqnak);
4110 def_access_ibp_counter(rc_dupreq);
4111 def_access_ibp_counter(rdma_seq);
4112 def_access_ibp_counter(unaligned);
4113 def_access_ibp_counter(seq_naks);
4115 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4116 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4117 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4119 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4121 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4122 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4124 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4126 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4127 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4128 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4129 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4130 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4132 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4134 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4136 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4138 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4140 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4142 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4143 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4144 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4145 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4146 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4148 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4149 access_dc_rcv_err_cnt),
4150 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4152 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4154 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4156 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4157 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4158 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4159 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4161 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4162 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4163 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4165 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4167 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4169 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4171 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4173 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4175 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4177 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4178 CNTR_SYNTH | CNTR_VL),
4179 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4180 CNTR_SYNTH | CNTR_VL),
4181 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4182 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4183 CNTR_SYNTH | CNTR_VL),
4184 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4185 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4186 CNTR_SYNTH | CNTR_VL),
4187 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4189 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4190 CNTR_SYNTH | CNTR_VL),
4191 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4193 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4194 CNTR_SYNTH | CNTR_VL),
4196 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4198 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4200 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4202 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4204 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4206 [C_DC_CRC_MULT_LN] =
4207 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4209 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4211 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4213 [C_DC_SEQ_CRC_CNT] =
4214 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4216 [C_DC_ESC0_ONLY_CNT] =
4217 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4219 [C_DC_ESC0_PLUS1_CNT] =
4220 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4222 [C_DC_ESC0_PLUS2_CNT] =
4223 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4225 [C_DC_REINIT_FROM_PEER_CNT] =
4226 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4228 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4230 [C_DC_MISC_FLG_CNT] =
4231 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4233 [C_DC_PRF_GOOD_LTP_CNT] =
4234 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4235 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4236 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4238 [C_DC_PRF_RX_FLIT_CNT] =
4239 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4240 [C_DC_PRF_TX_FLIT_CNT] =
4241 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4242 [C_DC_PRF_CLK_CNTR] =
4243 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4244 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4245 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4246 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4247 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4249 [C_DC_PG_STS_TX_SBE_CNT] =
4250 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4251 [C_DC_PG_STS_TX_MBE_CNT] =
4252 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4254 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4255 access_sw_cpu_intr),
4256 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4257 access_sw_cpu_rcv_limit),
4258 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4259 access_sw_ctx0_seq_drop),
4260 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4261 access_sw_vtx_wait),
4262 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4263 access_sw_pio_wait),
4264 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4265 access_sw_pio_drain),
4266 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4267 access_sw_kmem_wait),
4268 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4269 access_sw_send_schedule),
4270 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4271 SEND_DMA_DESC_FETCHED_CNT, 0,
4272 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4273 dev_access_u32_csr),
4274 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4275 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4276 access_sde_int_cnt),
4277 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4278 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4279 access_sde_err_cnt),
4280 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4281 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4282 access_sde_idle_int_cnt),
4283 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4284 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4285 access_sde_progress_int_cnt),
4286 /* MISC_ERR_STATUS */
4287 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4289 access_misc_pll_lock_fail_err_cnt),
4290 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4292 access_misc_mbist_fail_err_cnt),
4293 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4295 access_misc_invalid_eep_cmd_err_cnt),
4296 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4298 access_misc_efuse_done_parity_err_cnt),
4299 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4301 access_misc_efuse_write_err_cnt),
4302 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4304 access_misc_efuse_read_bad_addr_err_cnt),
4305 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4307 access_misc_efuse_csr_parity_err_cnt),
4308 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4310 access_misc_fw_auth_failed_err_cnt),
4311 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4313 access_misc_key_mismatch_err_cnt),
4314 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4316 access_misc_sbus_write_failed_err_cnt),
4317 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4319 access_misc_csr_write_bad_addr_err_cnt),
4320 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4322 access_misc_csr_read_bad_addr_err_cnt),
4323 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4325 access_misc_csr_parity_err_cnt),
4327 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4329 access_sw_cce_err_status_aggregated_cnt),
4330 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4332 access_cce_msix_csr_parity_err_cnt),
4333 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4335 access_cce_int_map_unc_err_cnt),
4336 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4338 access_cce_int_map_cor_err_cnt),
4339 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4341 access_cce_msix_table_unc_err_cnt),
4342 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4344 access_cce_msix_table_cor_err_cnt),
4345 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4347 access_cce_rxdma_conv_fifo_parity_err_cnt),
4348 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4350 access_cce_rcpl_async_fifo_parity_err_cnt),
4351 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4353 access_cce_seg_write_bad_addr_err_cnt),
4354 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4356 access_cce_seg_read_bad_addr_err_cnt),
4357 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4359 access_la_triggered_cnt),
4360 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4362 access_cce_trgt_cpl_timeout_err_cnt),
4363 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4365 access_pcic_receive_parity_err_cnt),
4366 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4368 access_pcic_transmit_back_parity_err_cnt),
4369 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4371 access_pcic_transmit_front_parity_err_cnt),
4372 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4374 access_pcic_cpl_dat_q_unc_err_cnt),
4375 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4377 access_pcic_cpl_hd_q_unc_err_cnt),
4378 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4380 access_pcic_post_dat_q_unc_err_cnt),
4381 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4383 access_pcic_post_hd_q_unc_err_cnt),
4384 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4386 access_pcic_retry_sot_mem_unc_err_cnt),
4387 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4389 access_pcic_retry_mem_unc_err),
4390 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4392 access_pcic_n_post_dat_q_parity_err_cnt),
4393 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4395 access_pcic_n_post_h_q_parity_err_cnt),
4396 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4398 access_pcic_cpl_dat_q_cor_err_cnt),
4399 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4401 access_pcic_cpl_hd_q_cor_err_cnt),
4402 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4404 access_pcic_post_dat_q_cor_err_cnt),
4405 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4407 access_pcic_post_hd_q_cor_err_cnt),
4408 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4410 access_pcic_retry_sot_mem_cor_err_cnt),
4411 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4413 access_pcic_retry_mem_cor_err_cnt),
4414 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4415 "CceCli1AsyncFifoDbgParityError", 0, 0,
4417 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4418 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4419 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4421 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4423 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4424 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4426 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4427 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4428 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4430 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4431 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4433 access_cce_cli2_async_fifo_parity_err_cnt),
4434 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4436 access_cce_csr_cfg_bus_parity_err_cnt),
4437 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4439 access_cce_cli0_async_fifo_parity_err_cnt),
4440 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4442 access_cce_rspd_data_parity_err_cnt),
4443 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4445 access_cce_trgt_access_err_cnt),
4446 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4448 access_cce_trgt_async_fifo_parity_err_cnt),
4449 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4451 access_cce_csr_write_bad_addr_err_cnt),
4452 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4454 access_cce_csr_read_bad_addr_err_cnt),
4455 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4457 access_ccs_csr_parity_err_cnt),
4460 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4462 access_rx_csr_parity_err_cnt),
4463 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4465 access_rx_csr_write_bad_addr_err_cnt),
4466 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4468 access_rx_csr_read_bad_addr_err_cnt),
4469 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4471 access_rx_dma_csr_unc_err_cnt),
4472 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4474 access_rx_dma_dq_fsm_encoding_err_cnt),
4475 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4477 access_rx_dma_eq_fsm_encoding_err_cnt),
4478 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4480 access_rx_dma_csr_parity_err_cnt),
4481 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4483 access_rx_rbuf_data_cor_err_cnt),
4484 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4486 access_rx_rbuf_data_unc_err_cnt),
4487 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4489 access_rx_dma_data_fifo_rd_cor_err_cnt),
4490 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4492 access_rx_dma_data_fifo_rd_unc_err_cnt),
4493 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4495 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4496 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4498 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4499 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4501 access_rx_rbuf_desc_part2_cor_err_cnt),
4502 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4504 access_rx_rbuf_desc_part2_unc_err_cnt),
4505 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4507 access_rx_rbuf_desc_part1_cor_err_cnt),
4508 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4510 access_rx_rbuf_desc_part1_unc_err_cnt),
4511 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4513 access_rx_hq_intr_fsm_err_cnt),
4514 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4516 access_rx_hq_intr_csr_parity_err_cnt),
4517 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4519 access_rx_lookup_csr_parity_err_cnt),
4520 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4522 access_rx_lookup_rcv_array_cor_err_cnt),
4523 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4525 access_rx_lookup_rcv_array_unc_err_cnt),
4526 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4528 access_rx_lookup_des_part2_parity_err_cnt),
4529 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4531 access_rx_lookup_des_part1_unc_cor_err_cnt),
4532 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4534 access_rx_lookup_des_part1_unc_err_cnt),
4535 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4537 access_rx_rbuf_next_free_buf_cor_err_cnt),
4538 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4540 access_rx_rbuf_next_free_buf_unc_err_cnt),
4541 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4542 "RxRbufFlInitWrAddrParityErr", 0, 0,
4544 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4545 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4547 access_rx_rbuf_fl_initdone_parity_err_cnt),
4548 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4550 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4551 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4553 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4554 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4556 access_rx_rbuf_empty_err_cnt),
4557 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4559 access_rx_rbuf_full_err_cnt),
4560 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4562 access_rbuf_bad_lookup_err_cnt),
4563 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4565 access_rbuf_ctx_id_parity_err_cnt),
4566 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4568 access_rbuf_csr_qeopdw_parity_err_cnt),
4569 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4570 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4572 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4574 "RxRbufCsrQTlPtrParityErr", 0, 0,
4576 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4577 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4579 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4580 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4582 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4583 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4585 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4586 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4588 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4589 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4590 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4592 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4593 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4595 access_rx_rbuf_block_list_read_cor_err_cnt),
4596 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4598 access_rx_rbuf_block_list_read_unc_err_cnt),
4599 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4601 access_rx_rbuf_lookup_des_cor_err_cnt),
4602 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4604 access_rx_rbuf_lookup_des_unc_err_cnt),
4605 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4606 "RxRbufLookupDesRegUncCorErr", 0, 0,
4608 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4609 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4611 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4612 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4614 access_rx_rbuf_free_list_cor_err_cnt),
4615 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4617 access_rx_rbuf_free_list_unc_err_cnt),
4618 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4620 access_rx_rcv_fsm_encoding_err_cnt),
4621 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4623 access_rx_dma_flag_cor_err_cnt),
4624 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4626 access_rx_dma_flag_unc_err_cnt),
4627 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4629 access_rx_dc_sop_eop_parity_err_cnt),
4630 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4632 access_rx_rcv_csr_parity_err_cnt),
4633 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4635 access_rx_rcv_qp_map_table_cor_err_cnt),
4636 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4638 access_rx_rcv_qp_map_table_unc_err_cnt),
4639 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4641 access_rx_rcv_data_cor_err_cnt),
4642 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4644 access_rx_rcv_data_unc_err_cnt),
4645 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4647 access_rx_rcv_hdr_cor_err_cnt),
4648 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4650 access_rx_rcv_hdr_unc_err_cnt),
4651 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4653 access_rx_dc_intf_parity_err_cnt),
4654 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4656 access_rx_dma_csr_cor_err_cnt),
4657 /* SendPioErrStatus */
4658 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4660 access_pio_pec_sop_head_parity_err_cnt),
4661 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4663 access_pio_pcc_sop_head_parity_err_cnt),
4664 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4666 access_pio_last_returned_cnt_parity_err_cnt),
4667 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4669 access_pio_current_free_cnt_parity_err_cnt),
4670 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4672 access_pio_reserved_31_err_cnt),
4673 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4675 access_pio_reserved_30_err_cnt),
4676 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4678 access_pio_ppmc_sop_len_err_cnt),
4679 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4681 access_pio_ppmc_bqc_mem_parity_err_cnt),
4682 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4684 access_pio_vl_fifo_parity_err_cnt),
4685 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4687 access_pio_vlf_sop_parity_err_cnt),
4688 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4690 access_pio_vlf_v1_len_parity_err_cnt),
4691 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4693 access_pio_block_qw_count_parity_err_cnt),
4694 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4696 access_pio_write_qw_valid_parity_err_cnt),
4697 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4699 access_pio_state_machine_err_cnt),
4700 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4702 access_pio_write_data_parity_err_cnt),
4703 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4705 access_pio_host_addr_mem_cor_err_cnt),
4706 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4708 access_pio_host_addr_mem_unc_err_cnt),
4709 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4711 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4712 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4714 access_pio_init_sm_in_err_cnt),
4715 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4717 access_pio_ppmc_pbl_fifo_err_cnt),
4718 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4720 access_pio_credit_ret_fifo_parity_err_cnt),
4721 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4723 access_pio_v1_len_mem_bank1_cor_err_cnt),
4724 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4726 access_pio_v1_len_mem_bank0_cor_err_cnt),
4727 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4729 access_pio_v1_len_mem_bank1_unc_err_cnt),
4730 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4732 access_pio_v1_len_mem_bank0_unc_err_cnt),
4733 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4735 access_pio_sm_pkt_reset_parity_err_cnt),
4736 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4738 access_pio_pkt_evict_fifo_parity_err_cnt),
4739 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4740 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4742 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4743 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4745 access_pio_sbrdctl_crrel_parity_err_cnt),
4746 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4748 access_pio_pec_fifo_parity_err_cnt),
4749 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4751 access_pio_pcc_fifo_parity_err_cnt),
4752 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4754 access_pio_sb_mem_fifo1_err_cnt),
4755 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4757 access_pio_sb_mem_fifo0_err_cnt),
4758 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4760 access_pio_csr_parity_err_cnt),
4761 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4763 access_pio_write_addr_parity_err_cnt),
4764 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4766 access_pio_write_bad_ctxt_err_cnt),
4767 /* SendDmaErrStatus */
4768 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4770 access_sdma_pcie_req_tracking_cor_err_cnt),
4771 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4773 access_sdma_pcie_req_tracking_unc_err_cnt),
4774 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4776 access_sdma_csr_parity_err_cnt),
4777 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4779 access_sdma_rpy_tag_err_cnt),
4780 /* SendEgressErrStatus */
4781 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4783 access_tx_read_pio_memory_csr_unc_err_cnt),
4784 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4786 access_tx_read_sdma_memory_csr_err_cnt),
4787 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4789 access_tx_egress_fifo_cor_err_cnt),
4790 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4792 access_tx_read_pio_memory_cor_err_cnt),
4793 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4795 access_tx_read_sdma_memory_cor_err_cnt),
4796 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4798 access_tx_sb_hdr_cor_err_cnt),
4799 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4801 access_tx_credit_overrun_err_cnt),
4802 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4804 access_tx_launch_fifo8_cor_err_cnt),
4805 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4807 access_tx_launch_fifo7_cor_err_cnt),
4808 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4810 access_tx_launch_fifo6_cor_err_cnt),
4811 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4813 access_tx_launch_fifo5_cor_err_cnt),
4814 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4816 access_tx_launch_fifo4_cor_err_cnt),
4817 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4819 access_tx_launch_fifo3_cor_err_cnt),
4820 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4822 access_tx_launch_fifo2_cor_err_cnt),
4823 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4825 access_tx_launch_fifo1_cor_err_cnt),
4826 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4828 access_tx_launch_fifo0_cor_err_cnt),
4829 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4831 access_tx_credit_return_vl_err_cnt),
4832 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4834 access_tx_hcrc_insertion_err_cnt),
4835 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4837 access_tx_egress_fifo_unc_err_cnt),
4838 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4840 access_tx_read_pio_memory_unc_err_cnt),
4841 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4843 access_tx_read_sdma_memory_unc_err_cnt),
4844 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4846 access_tx_sb_hdr_unc_err_cnt),
4847 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4849 access_tx_credit_return_partiy_err_cnt),
4850 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4852 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4853 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4855 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4856 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4858 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4859 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4861 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4862 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4864 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4865 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4867 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4868 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4870 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4871 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4873 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4874 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4876 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4877 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4879 access_tx_sdma15_disallowed_packet_err_cnt),
4880 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4882 access_tx_sdma14_disallowed_packet_err_cnt),
4883 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4885 access_tx_sdma13_disallowed_packet_err_cnt),
4886 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4888 access_tx_sdma12_disallowed_packet_err_cnt),
4889 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4891 access_tx_sdma11_disallowed_packet_err_cnt),
4892 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4894 access_tx_sdma10_disallowed_packet_err_cnt),
4895 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4897 access_tx_sdma9_disallowed_packet_err_cnt),
4898 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4900 access_tx_sdma8_disallowed_packet_err_cnt),
4901 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4903 access_tx_sdma7_disallowed_packet_err_cnt),
4904 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4906 access_tx_sdma6_disallowed_packet_err_cnt),
4907 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4909 access_tx_sdma5_disallowed_packet_err_cnt),
4910 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4912 access_tx_sdma4_disallowed_packet_err_cnt),
4913 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4915 access_tx_sdma3_disallowed_packet_err_cnt),
4916 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4918 access_tx_sdma2_disallowed_packet_err_cnt),
4919 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4921 access_tx_sdma1_disallowed_packet_err_cnt),
4922 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4924 access_tx_sdma0_disallowed_packet_err_cnt),
4925 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4927 access_tx_config_parity_err_cnt),
4928 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4930 access_tx_sbrd_ctl_csr_parity_err_cnt),
4931 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4933 access_tx_launch_csr_parity_err_cnt),
4934 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4936 access_tx_illegal_vl_err_cnt),
4937 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4938 "TxSbrdCtlStateMachineParityErr", 0, 0,
4940 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4941 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4943 access_egress_reserved_10_err_cnt),
4944 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4946 access_egress_reserved_9_err_cnt),
4947 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4949 access_tx_sdma_launch_intf_parity_err_cnt),
4950 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4952 access_tx_pio_launch_intf_parity_err_cnt),
4953 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4955 access_egress_reserved_6_err_cnt),
4956 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4958 access_tx_incorrect_link_state_err_cnt),
4959 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4961 access_tx_linkdown_err_cnt),
4962 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4963 "EgressFifoUnderrunOrParityErr", 0, 0,
4965 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4966 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4968 access_egress_reserved_2_err_cnt),
4969 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4971 access_tx_pkt_integrity_mem_unc_err_cnt),
4972 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4974 access_tx_pkt_integrity_mem_cor_err_cnt),
4976 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4978 access_send_csr_write_bad_addr_err_cnt),
4979 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4981 access_send_csr_read_bad_addr_err_cnt),
4982 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4984 access_send_csr_parity_cnt),
4985 /* SendCtxtErrStatus */
4986 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4988 access_pio_write_out_of_bounds_err_cnt),
4989 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4991 access_pio_write_overflow_err_cnt),
4992 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4994 access_pio_write_crosses_boundary_err_cnt),
4995 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4997 access_pio_disallowed_packet_err_cnt),
4998 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
5000 access_pio_inconsistent_sop_err_cnt),
5001 /* SendDmaEngErrStatus */
5002 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
5004 access_sdma_header_request_fifo_cor_err_cnt),
5005 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5007 access_sdma_header_storage_cor_err_cnt),
5008 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5010 access_sdma_packet_tracking_cor_err_cnt),
5011 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5013 access_sdma_assembly_cor_err_cnt),
5014 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5016 access_sdma_desc_table_cor_err_cnt),
5017 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5019 access_sdma_header_request_fifo_unc_err_cnt),
5020 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5022 access_sdma_header_storage_unc_err_cnt),
5023 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5025 access_sdma_packet_tracking_unc_err_cnt),
5026 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5028 access_sdma_assembly_unc_err_cnt),
5029 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5031 access_sdma_desc_table_unc_err_cnt),
5032 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5034 access_sdma_timeout_err_cnt),
5035 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5037 access_sdma_header_length_err_cnt),
5038 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5040 access_sdma_header_address_err_cnt),
5041 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5043 access_sdma_header_select_err_cnt),
5044 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5046 access_sdma_reserved_9_err_cnt),
5047 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5049 access_sdma_packet_desc_overflow_err_cnt),
5050 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5052 access_sdma_length_mismatch_err_cnt),
5053 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5055 access_sdma_halt_err_cnt),
5056 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5058 access_sdma_mem_read_err_cnt),
5059 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5061 access_sdma_first_desc_err_cnt),
5062 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5064 access_sdma_tail_out_of_bounds_err_cnt),
5065 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5067 access_sdma_too_long_err_cnt),
5068 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5070 access_sdma_gen_mismatch_err_cnt),
5071 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5073 access_sdma_wrong_dw_err_cnt),
5076 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5077 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5079 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5081 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5083 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5085 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5087 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5089 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5091 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5092 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5093 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5094 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5095 CNTR_SYNTH | CNTR_VL),
5096 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5097 CNTR_SYNTH | CNTR_VL),
5098 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5099 CNTR_SYNTH | CNTR_VL),
5100 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5101 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5102 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5103 access_sw_link_dn_cnt),
5104 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5105 access_sw_link_up_cnt),
5106 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5107 access_sw_unknown_frame_cnt),
5108 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5109 access_sw_xmit_discards),
5110 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5111 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5112 access_sw_xmit_discards),
5113 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5114 access_xmit_constraint_errs),
5115 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5116 access_rcv_constraint_errs),
5117 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5118 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5119 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5120 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5121 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5122 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5123 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5124 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5125 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5126 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5127 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5128 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5129 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5130 access_sw_cpu_rc_acks),
5131 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5132 access_sw_cpu_rc_qacks),
5133 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5134 access_sw_cpu_rc_delayed_comp),
5135 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5136 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5137 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5138 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5139 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5140 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5141 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5142 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5143 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5144 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5145 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5146 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5147 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5148 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5149 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5150 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5151 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5152 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5153 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5154 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5155 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5156 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5157 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5158 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5159 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5160 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5161 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5162 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5163 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5164 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5165 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5166 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5167 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5168 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5169 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5170 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5171 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5172 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5173 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5174 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5175 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5176 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5177 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5178 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5179 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5180 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5181 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5182 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5183 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5184 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5185 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5186 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5187 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5188 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5189 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5190 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5191 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5192 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5193 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5194 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5195 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5196 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5197 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5198 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5199 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5200 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5201 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5202 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5203 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5204 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5205 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5206 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5207 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5208 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5209 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5210 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5211 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5212 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5213 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5214 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5217 /* ======================================================================== */
5219 /* return true if this is chip revision revision a */
5220 int is_ax(struct hfi1_devdata *dd)
5223 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5224 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5225 return (chip_rev_minor & 0xf0) == 0;
5228 /* return true if this is chip revision revision b */
5229 int is_bx(struct hfi1_devdata *dd)
5232 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5233 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5234 return (chip_rev_minor & 0xF0) == 0x10;
5238 * Append string s to buffer buf. Arguments curp and len are the current
5239 * position and remaining length, respectively.
5241 * return 0 on success, 1 on out of room
5243 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5247 int result = 0; /* success */
5250 /* add a comma, if first in the buffer */
5253 result = 1; /* out of room */
5260 /* copy the string */
5261 while ((c = *s++) != 0) {
5263 result = 1; /* out of room */
5271 /* write return values */
5279 * Using the given flag table, print a comma separated string into
5280 * the buffer. End in '*' if the buffer is too short.
5282 static char *flag_string(char *buf, int buf_len, u64 flags,
5283 struct flag_table *table, int table_size)
5291 /* make sure there is at least 2 so we can form "*" */
5295 len--; /* leave room for a nul */
5296 for (i = 0; i < table_size; i++) {
5297 if (flags & table[i].flag) {
5298 no_room = append_str(buf, &p, &len, table[i].str);
5301 flags &= ~table[i].flag;
5305 /* any undocumented bits left? */
5306 if (!no_room && flags) {
5307 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5308 no_room = append_str(buf, &p, &len, extra);
5311 /* add * if ran out of room */
5313 /* may need to back up to add space for a '*' */
5319 /* add final nul - space already allocated above */
5324 /* first 8 CCE error interrupt source names */
5325 static const char * const cce_misc_names[] = {
5326 "CceErrInt", /* 0 */
5327 "RxeErrInt", /* 1 */
5328 "MiscErrInt", /* 2 */
5329 "Reserved3", /* 3 */
5330 "PioErrInt", /* 4 */
5331 "SDmaErrInt", /* 5 */
5332 "EgressErrInt", /* 6 */
5337 * Return the miscellaneous error interrupt name.
5339 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5341 if (source < ARRAY_SIZE(cce_misc_names))
5342 strncpy(buf, cce_misc_names[source], bsize);
5344 snprintf(buf, bsize, "Reserved%u",
5345 source + IS_GENERAL_ERR_START);
5351 * Return the SDMA engine error interrupt name.
5353 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5355 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5360 * Return the send context error interrupt name.
5362 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5364 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5368 static const char * const various_names[] = {
5377 * Return the various interrupt name.
5379 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5381 if (source < ARRAY_SIZE(various_names))
5382 strncpy(buf, various_names[source], bsize);
5384 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5389 * Return the DC interrupt name.
5391 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5393 static const char * const dc_int_names[] = {
5397 "lbm" /* local block merge */
5400 if (source < ARRAY_SIZE(dc_int_names))
5401 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5403 snprintf(buf, bsize, "DCInt%u", source);
5407 static const char * const sdma_int_names[] = {
5414 * Return the SDMA engine interrupt name.
5416 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5418 /* what interrupt */
5419 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5421 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5423 if (likely(what < 3))
5424 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5426 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5431 * Return the receive available interrupt name.
5433 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5435 snprintf(buf, bsize, "RcvAvailInt%u", source);
5440 * Return the receive urgent interrupt name.
5442 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5444 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5449 * Return the send credit interrupt name.
5451 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5453 snprintf(buf, bsize, "SendCreditInt%u", source);
5458 * Return the reserved interrupt name.
5460 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5462 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5466 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5468 return flag_string(buf, buf_len, flags,
5469 cce_err_status_flags,
5470 ARRAY_SIZE(cce_err_status_flags));
5473 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5475 return flag_string(buf, buf_len, flags,
5476 rxe_err_status_flags,
5477 ARRAY_SIZE(rxe_err_status_flags));
5480 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5482 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5483 ARRAY_SIZE(misc_err_status_flags));
5486 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5488 return flag_string(buf, buf_len, flags,
5489 pio_err_status_flags,
5490 ARRAY_SIZE(pio_err_status_flags));
5493 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5495 return flag_string(buf, buf_len, flags,
5496 sdma_err_status_flags,
5497 ARRAY_SIZE(sdma_err_status_flags));
5500 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5502 return flag_string(buf, buf_len, flags,
5503 egress_err_status_flags,
5504 ARRAY_SIZE(egress_err_status_flags));
5507 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5509 return flag_string(buf, buf_len, flags,
5510 egress_err_info_flags,
5511 ARRAY_SIZE(egress_err_info_flags));
5514 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5516 return flag_string(buf, buf_len, flags,
5517 send_err_status_flags,
5518 ARRAY_SIZE(send_err_status_flags));
5521 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5527 * For most these errors, there is nothing that can be done except
5528 * report or record it.
5530 dd_dev_info(dd, "CCE Error: %s\n",
5531 cce_err_status_string(buf, sizeof(buf), reg));
5533 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5534 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5535 /* this error requires a manual drop into SPC freeze mode */
5537 start_freeze_handling(dd->pport, FREEZE_SELF);
5540 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5541 if (reg & (1ull << i)) {
5542 incr_cntr64(&dd->cce_err_status_cnt[i]);
5543 /* maintain a counter over all cce_err_status errors */
5544 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5550 * Check counters for receive errors that do not have an interrupt
5551 * associated with them.
5553 #define RCVERR_CHECK_TIME 10
5554 static void update_rcverr_timer(struct timer_list *t)
5556 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5557 struct hfi1_pportdata *ppd = dd->pport;
5558 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5560 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5561 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5562 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5563 set_link_down_reason(
5564 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5565 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5566 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5568 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5570 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5573 static int init_rcverr(struct hfi1_devdata *dd)
5575 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5576 /* Assume the hardware counter has been reset */
5577 dd->rcv_ovfl_cnt = 0;
5578 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5581 static void free_rcverr(struct hfi1_devdata *dd)
5583 if (dd->rcverr_timer.function)
5584 del_timer_sync(&dd->rcverr_timer);
5587 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5592 dd_dev_info(dd, "Receive Error: %s\n",
5593 rxe_err_status_string(buf, sizeof(buf), reg));
5595 if (reg & ALL_RXE_FREEZE_ERR) {
5599 * Freeze mode recovery is disabled for the errors
5600 * in RXE_FREEZE_ABORT_MASK
5602 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5603 flags = FREEZE_ABORT;
5605 start_freeze_handling(dd->pport, flags);
5608 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5609 if (reg & (1ull << i))
5610 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5614 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5619 dd_dev_info(dd, "Misc Error: %s",
5620 misc_err_status_string(buf, sizeof(buf), reg));
5621 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5622 if (reg & (1ull << i))
5623 incr_cntr64(&dd->misc_err_status_cnt[i]);
5627 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5632 dd_dev_info(dd, "PIO Error: %s\n",
5633 pio_err_status_string(buf, sizeof(buf), reg));
5635 if (reg & ALL_PIO_FREEZE_ERR)
5636 start_freeze_handling(dd->pport, 0);
5638 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5639 if (reg & (1ull << i))
5640 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5644 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5649 dd_dev_info(dd, "SDMA Error: %s\n",
5650 sdma_err_status_string(buf, sizeof(buf), reg));
5652 if (reg & ALL_SDMA_FREEZE_ERR)
5653 start_freeze_handling(dd->pport, 0);
5655 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5656 if (reg & (1ull << i))
5657 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5661 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5663 incr_cntr64(&ppd->port_xmit_discards);
5666 static void count_port_inactive(struct hfi1_devdata *dd)
5668 __count_port_discards(dd->pport);
5672 * We have had a "disallowed packet" error during egress. Determine the
5673 * integrity check which failed, and update relevant error counter, etc.
5675 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5676 * bit of state per integrity check, and so we can miss the reason for an
5677 * egress error if more than one packet fails the same integrity check
5678 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5680 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5683 struct hfi1_pportdata *ppd = dd->pport;
5684 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5685 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5688 /* clear down all observed info as quickly as possible after read */
5689 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5692 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5693 info, egress_err_info_string(buf, sizeof(buf), info), src);
5695 /* Eventually add other counters for each bit */
5696 if (info & PORT_DISCARD_EGRESS_ERRS) {
5700 * Count all applicable bits as individual errors and
5701 * attribute them to the packet that triggered this handler.
5702 * This may not be completely accurate due to limitations
5703 * on the available hardware error information. There is
5704 * a single information register and any number of error
5705 * packets may have occurred and contributed to it before
5706 * this routine is called. This means that:
5707 * a) If multiple packets with the same error occur before
5708 * this routine is called, earlier packets are missed.
5709 * There is only a single bit for each error type.
5710 * b) Errors may not be attributed to the correct VL.
5711 * The driver is attributing all bits in the info register
5712 * to the packet that triggered this call, but bits
5713 * could be an accumulation of different packets with
5715 * c) A single error packet may have multiple counts attached
5716 * to it. There is no way for the driver to know if
5717 * multiple bits set in the info register are due to a
5718 * single packet or multiple packets. The driver assumes
5721 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5722 for (i = 0; i < weight; i++) {
5723 __count_port_discards(ppd);
5724 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5725 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5727 incr_cntr64(&ppd->port_xmit_discards_vl
5734 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5735 * register. Does it represent a 'port inactive' error?
5737 static inline int port_inactive_err(u64 posn)
5739 return (posn >= SEES(TX_LINKDOWN) &&
5740 posn <= SEES(TX_INCORRECT_LINK_STATE));
5744 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5745 * register. Does it represent a 'disallowed packet' error?
5747 static inline int disallowed_pkt_err(int posn)
5749 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5750 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5754 * Input value is a bit position of one of the SDMA engine disallowed
5755 * packet errors. Return which engine. Use of this must be guarded by
5756 * disallowed_pkt_err().
5758 static inline int disallowed_pkt_engine(int posn)
5760 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5764 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5767 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5769 struct sdma_vl_map *m;
5773 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5777 m = rcu_dereference(dd->sdma_map);
5778 vl = m->engine_to_vl[engine];
5785 * Translate the send context (sofware index) into a VL. Return -1 if the
5786 * translation cannot be done.
5788 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5790 struct send_context_info *sci;
5791 struct send_context *sc;
5794 sci = &dd->send_contexts[sw_index];
5796 /* there is no information for user (PSM) and ack contexts */
5797 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5803 if (dd->vld[15].sc == sc)
5805 for (i = 0; i < num_vls; i++)
5806 if (dd->vld[i].sc == sc)
5812 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5814 u64 reg_copy = reg, handled = 0;
5818 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5819 start_freeze_handling(dd->pport, 0);
5820 else if (is_ax(dd) &&
5821 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5822 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5823 start_freeze_handling(dd->pport, 0);
5826 int posn = fls64(reg_copy);
5827 /* fls64() returns a 1-based offset, we want it zero based */
5828 int shift = posn - 1;
5829 u64 mask = 1ULL << shift;
5831 if (port_inactive_err(shift)) {
5832 count_port_inactive(dd);
5834 } else if (disallowed_pkt_err(shift)) {
5835 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5837 handle_send_egress_err_info(dd, vl);
5846 dd_dev_info(dd, "Egress Error: %s\n",
5847 egress_err_status_string(buf, sizeof(buf), reg));
5849 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5850 if (reg & (1ull << i))
5851 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5855 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5860 dd_dev_info(dd, "Send Error: %s\n",
5861 send_err_status_string(buf, sizeof(buf), reg));
5863 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5864 if (reg & (1ull << i))
5865 incr_cntr64(&dd->send_err_status_cnt[i]);
5870 * The maximum number of times the error clear down will loop before
5871 * blocking a repeating error. This value is arbitrary.
5873 #define MAX_CLEAR_COUNT 20
5876 * Clear and handle an error register. All error interrupts are funneled
5877 * through here to have a central location to correctly handle single-
5878 * or multi-shot errors.
5880 * For non per-context registers, call this routine with a context value
5881 * of 0 so the per-context offset is zero.
5883 * If the handler loops too many times, assume that something is wrong
5884 * and can't be fixed, so mask the error bits.
5886 static void interrupt_clear_down(struct hfi1_devdata *dd,
5888 const struct err_reg_info *eri)
5893 /* read in a loop until no more errors are seen */
5896 reg = read_kctxt_csr(dd, context, eri->status);
5899 write_kctxt_csr(dd, context, eri->clear, reg);
5900 if (likely(eri->handler))
5901 eri->handler(dd, context, reg);
5903 if (count > MAX_CLEAR_COUNT) {
5906 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5909 * Read-modify-write so any other masked bits
5912 mask = read_kctxt_csr(dd, context, eri->mask);
5914 write_kctxt_csr(dd, context, eri->mask, mask);
5921 * CCE block "misc" interrupt. Source is < 16.
5923 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5925 const struct err_reg_info *eri = &misc_errs[source];
5928 interrupt_clear_down(dd, 0, eri);
5930 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5935 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5937 return flag_string(buf, buf_len, flags,
5938 sc_err_status_flags,
5939 ARRAY_SIZE(sc_err_status_flags));
5943 * Send context error interrupt. Source (hw_context) is < 160.
5945 * All send context errors cause the send context to halt. The normal
5946 * clear-down mechanism cannot be used because we cannot clear the
5947 * error bits until several other long-running items are done first.
5948 * This is OK because with the context halted, nothing else is going
5949 * to happen on it anyway.
5951 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5952 unsigned int hw_context)
5954 struct send_context_info *sci;
5955 struct send_context *sc;
5960 unsigned long irq_flags;
5962 sw_index = dd->hw_to_sw[hw_context];
5963 if (sw_index >= dd->num_send_contexts) {
5965 "out of range sw index %u for send context %u\n",
5966 sw_index, hw_context);
5969 sci = &dd->send_contexts[sw_index];
5970 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5973 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5974 sw_index, hw_context);
5975 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5979 /* tell the software that a halt has begun */
5980 sc_stop(sc, SCF_HALTED);
5982 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5984 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5985 send_context_err_status_string(flags, sizeof(flags),
5988 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5989 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5992 * Automatically restart halted kernel contexts out of interrupt
5993 * context. User contexts must ask the driver to restart the context.
5995 if (sc->type != SC_USER)
5996 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5997 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6000 * Update the counters for the corresponding status bits.
6001 * Note that these particular counters are aggregated over all
6004 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6005 if (status & (1ull << i))
6006 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6010 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6011 unsigned int source, u64 status)
6013 struct sdma_engine *sde;
6016 sde = &dd->per_sdma[source];
6017 #ifdef CONFIG_SDMA_VERBOSITY
6018 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6019 slashstrip(__FILE__), __LINE__, __func__);
6020 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6021 sde->this_idx, source, (unsigned long long)status);
6024 sdma_engine_error(sde, status);
6027 * Update the counters for the corresponding status bits.
6028 * Note that these particular counters are aggregated over
6029 * all 16 DMA engines.
6031 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6032 if (status & (1ull << i))
6033 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6038 * CCE block SDMA error interrupt. Source is < 16.
6040 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6042 #ifdef CONFIG_SDMA_VERBOSITY
6043 struct sdma_engine *sde = &dd->per_sdma[source];
6045 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6046 slashstrip(__FILE__), __LINE__, __func__);
6047 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6049 sdma_dumpstate(sde);
6051 interrupt_clear_down(dd, source, &sdma_eng_err);
6055 * CCE block "various" interrupt. Source is < 8.
6057 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6059 const struct err_reg_info *eri = &various_err[source];
6062 * TCritInt cannot go through interrupt_clear_down()
6063 * because it is not a second tier interrupt. The handler
6064 * should be called directly.
6066 if (source == TCRIT_INT_SOURCE)
6067 handle_temp_err(dd);
6068 else if (eri->handler)
6069 interrupt_clear_down(dd, 0, eri);
6072 "%s: Unimplemented/reserved interrupt %d\n",
6076 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6078 /* src_ctx is always zero */
6079 struct hfi1_pportdata *ppd = dd->pport;
6080 unsigned long flags;
6081 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6083 if (reg & QSFP_HFI0_MODPRST_N) {
6084 if (!qsfp_mod_present(ppd)) {
6085 dd_dev_info(dd, "%s: QSFP module removed\n",
6088 ppd->driver_link_ready = 0;
6090 * Cable removed, reset all our information about the
6091 * cache and cable capabilities
6094 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6096 * We don't set cache_refresh_required here as we expect
6097 * an interrupt when a cable is inserted
6099 ppd->qsfp_info.cache_valid = 0;
6100 ppd->qsfp_info.reset_needed = 0;
6101 ppd->qsfp_info.limiting_active = 0;
6102 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6104 /* Invert the ModPresent pin now to detect plug-in */
6105 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6106 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6108 if ((ppd->offline_disabled_reason >
6110 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6111 (ppd->offline_disabled_reason ==
6112 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6113 ppd->offline_disabled_reason =
6115 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6117 if (ppd->host_link_state == HLS_DN_POLL) {
6119 * The link is still in POLL. This means
6120 * that the normal link down processing
6121 * will not happen. We have to do it here
6122 * before turning the DC off.
6124 queue_work(ppd->link_wq, &ppd->link_down_work);
6127 dd_dev_info(dd, "%s: QSFP module inserted\n",
6130 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6131 ppd->qsfp_info.cache_valid = 0;
6132 ppd->qsfp_info.cache_refresh_required = 1;
6133 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6137 * Stop inversion of ModPresent pin to detect
6138 * removal of the cable
6140 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6141 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6142 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6144 ppd->offline_disabled_reason =
6145 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6149 if (reg & QSFP_HFI0_INT_N) {
6150 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6152 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6153 ppd->qsfp_info.check_interrupt_flags = 1;
6154 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6157 /* Schedule the QSFP work only if there is a cable attached. */
6158 if (qsfp_mod_present(ppd))
6159 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6162 static int request_host_lcb_access(struct hfi1_devdata *dd)
6166 ret = do_8051_command(dd, HCMD_MISC,
6167 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6168 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6169 if (ret != HCMD_SUCCESS) {
6170 dd_dev_err(dd, "%s: command failed with error %d\n",
6173 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6176 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6180 ret = do_8051_command(dd, HCMD_MISC,
6181 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6182 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6183 if (ret != HCMD_SUCCESS) {
6184 dd_dev_err(dd, "%s: command failed with error %d\n",
6187 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6191 * Set the LCB selector - allow host access. The DCC selector always
6192 * points to the host.
6194 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6196 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6197 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6198 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6202 * Clear the LCB selector - allow 8051 access. The DCC selector always
6203 * points to the host.
6205 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6207 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6208 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6212 * Acquire LCB access from the 8051. If the host already has access,
6213 * just increment a counter. Otherwise, inform the 8051 that the
6214 * host is taking access.
6218 * -EBUSY if the 8051 has control and cannot be disturbed
6219 * -errno if unable to acquire access from the 8051
6221 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6223 struct hfi1_pportdata *ppd = dd->pport;
6227 * Use the host link state lock so the operation of this routine
6228 * { link state check, selector change, count increment } can occur
6229 * as a unit against a link state change. Otherwise there is a
6230 * race between the state change and the count increment.
6233 mutex_lock(&ppd->hls_lock);
6235 while (!mutex_trylock(&ppd->hls_lock))
6239 /* this access is valid only when the link is up */
6240 if (ppd->host_link_state & HLS_DOWN) {
6241 dd_dev_info(dd, "%s: link state %s not up\n",
6242 __func__, link_state_name(ppd->host_link_state));
6247 if (dd->lcb_access_count == 0) {
6248 ret = request_host_lcb_access(dd);
6251 "%s: unable to acquire LCB access, err %d\n",
6255 set_host_lcb_access(dd);
6257 dd->lcb_access_count++;
6259 mutex_unlock(&ppd->hls_lock);
6264 * Release LCB access by decrementing the use count. If the count is moving
6265 * from 1 to 0, inform 8051 that it has control back.
6269 * -errno if unable to release access to the 8051
6271 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6276 * Use the host link state lock because the acquire needed it.
6277 * Here, we only need to keep { selector change, count decrement }
6281 mutex_lock(&dd->pport->hls_lock);
6283 while (!mutex_trylock(&dd->pport->hls_lock))
6287 if (dd->lcb_access_count == 0) {
6288 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6293 if (dd->lcb_access_count == 1) {
6294 set_8051_lcb_access(dd);
6295 ret = request_8051_lcb_access(dd);
6298 "%s: unable to release LCB access, err %d\n",
6300 /* restore host access if the grant didn't work */
6301 set_host_lcb_access(dd);
6305 dd->lcb_access_count--;
6307 mutex_unlock(&dd->pport->hls_lock);
6312 * Initialize LCB access variables and state. Called during driver load,
6313 * after most of the initialization is finished.
6315 * The DC default is LCB access on for the host. The driver defaults to
6316 * leaving access to the 8051. Assign access now - this constrains the call
6317 * to this routine to be after all LCB set-up is done. In particular, after
6318 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6320 static void init_lcb_access(struct hfi1_devdata *dd)
6322 dd->lcb_access_count = 0;
6326 * Write a response back to a 8051 request.
6328 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6330 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6331 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6333 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6334 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6338 * Handle host requests from the 8051.
6340 static void handle_8051_request(struct hfi1_pportdata *ppd)
6342 struct hfi1_devdata *dd = ppd->dd;
6347 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6348 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6349 return; /* no request */
6351 /* zero out COMPLETED so the response is seen */
6352 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6354 /* extract request details */
6355 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6356 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6357 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6358 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6361 case HREQ_LOAD_CONFIG:
6362 case HREQ_SAVE_CONFIG:
6363 case HREQ_READ_CONFIG:
6364 case HREQ_SET_TX_EQ_ABS:
6365 case HREQ_SET_TX_EQ_REL:
6367 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6369 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6371 case HREQ_LCB_RESET:
6372 /* Put the LCB, RX FPE and TX FPE into reset */
6373 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6374 /* Make sure the write completed */
6375 (void)read_csr(dd, DCC_CFG_RESET);
6376 /* Hold the reset long enough to take effect */
6378 /* Take the LCB, RX FPE and TX FPE out of reset */
6379 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6380 hreq_response(dd, HREQ_SUCCESS, 0);
6383 case HREQ_CONFIG_DONE:
6384 hreq_response(dd, HREQ_SUCCESS, 0);
6387 case HREQ_INTERFACE_TEST:
6388 hreq_response(dd, HREQ_SUCCESS, data);
6391 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6392 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6398 * Set up allocation unit vaulue.
6400 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6402 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6404 /* do not modify other values in the register */
6405 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6406 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6407 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6411 * Set up initial VL15 credits of the remote. Assumes the rest of
6412 * the CM credit registers are zero from a previous global or credit reset.
6413 * Shared limit for VL15 will always be 0.
6415 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6417 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6419 /* set initial values for total and shared credit limit */
6420 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6421 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6424 * Set total limit to be equal to VL15 credits.
6425 * Leave shared limit at 0.
6427 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6428 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6430 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6431 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6435 * Zero all credit details from the previous connection and
6436 * reset the CM manager's internal counters.
6438 void reset_link_credits(struct hfi1_devdata *dd)
6442 /* remove all previous VL credit limits */
6443 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6444 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6445 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6446 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6447 /* reset the CM block */
6448 pio_send_control(dd, PSC_CM_RESET);
6449 /* reset cached value */
6450 dd->vl15buf_cached = 0;
6453 /* convert a vCU to a CU */
6454 static u32 vcu_to_cu(u8 vcu)
6459 /* convert a CU to a vCU */
6460 static u8 cu_to_vcu(u32 cu)
6465 /* convert a vAU to an AU */
6466 static u32 vau_to_au(u8 vau)
6468 return 8 * (1 << vau);
6471 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6473 ppd->sm_trap_qp = 0x0;
6478 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6480 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6484 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6485 write_csr(dd, DC_LCB_CFG_RUN, 0);
6486 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6487 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6488 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6489 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6490 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6491 reg = read_csr(dd, DCC_CFG_RESET);
6492 write_csr(dd, DCC_CFG_RESET, reg |
6493 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6494 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6496 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6497 write_csr(dd, DCC_CFG_RESET, reg);
6498 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6503 * This routine should be called after the link has been transitioned to
6504 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6507 * The expectation is that the caller of this routine would have taken
6508 * care of properly transitioning the link into the correct state.
6509 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6510 * before calling this function.
6512 static void _dc_shutdown(struct hfi1_devdata *dd)
6514 lockdep_assert_held(&dd->dc8051_lock);
6516 if (dd->dc_shutdown)
6519 dd->dc_shutdown = 1;
6520 /* Shutdown the LCB */
6521 lcb_shutdown(dd, 1);
6523 * Going to OFFLINE would have causes the 8051 to put the
6524 * SerDes into reset already. Just need to shut down the 8051,
6527 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6530 static void dc_shutdown(struct hfi1_devdata *dd)
6532 mutex_lock(&dd->dc8051_lock);
6534 mutex_unlock(&dd->dc8051_lock);
6538 * Calling this after the DC has been brought out of reset should not
6540 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6541 * before calling this function.
6543 static void _dc_start(struct hfi1_devdata *dd)
6545 lockdep_assert_held(&dd->dc8051_lock);
6547 if (!dd->dc_shutdown)
6550 /* Take the 8051 out of reset */
6551 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6552 /* Wait until 8051 is ready */
6553 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6554 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6557 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6558 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6559 /* lcb_shutdown() with abort=1 does not restore these */
6560 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6561 dd->dc_shutdown = 0;
6564 static void dc_start(struct hfi1_devdata *dd)
6566 mutex_lock(&dd->dc8051_lock);
6568 mutex_unlock(&dd->dc8051_lock);
6572 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6574 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6576 u64 rx_radr, tx_radr;
6579 if (dd->icode != ICODE_FPGA_EMULATION)
6583 * These LCB defaults on emulator _s are good, nothing to do here:
6584 * LCB_CFG_TX_FIFOS_RADR
6585 * LCB_CFG_RX_FIFOS_RADR
6587 * LCB_CFG_IGNORE_LOST_RCLK
6589 if (is_emulator_s(dd))
6591 /* else this is _p */
6593 version = emulator_rev(dd);
6595 version = 0x2d; /* all B0 use 0x2d or higher settings */
6597 if (version <= 0x12) {
6598 /* release 0x12 and below */
6601 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6602 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6603 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6606 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6607 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6608 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6610 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6611 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6613 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6614 } else if (version <= 0x18) {
6615 /* release 0x13 up to 0x18 */
6616 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6618 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6619 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6620 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6621 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6622 } else if (version == 0x19) {
6624 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6626 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6627 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6628 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6629 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6630 } else if (version == 0x1a) {
6632 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6634 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6635 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6636 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6637 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6638 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6640 /* release 0x1b and higher */
6641 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6643 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6644 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6645 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6646 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6649 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6650 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6651 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6652 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6653 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6657 * Handle a SMA idle message
6659 * This is a work-queue function outside of the interrupt.
6661 void handle_sma_message(struct work_struct *work)
6663 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6665 struct hfi1_devdata *dd = ppd->dd;
6670 * msg is bytes 1-4 of the 40-bit idle message - the command code
6673 ret = read_idle_sma(dd, &msg);
6676 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6678 * React to the SMA message. Byte[1] (0 for us) is the command.
6680 switch (msg & 0xff) {
6683 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6686 * Only expected in INIT or ARMED, discard otherwise.
6688 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6689 ppd->neighbor_normal = 1;
6691 case SMA_IDLE_ACTIVE:
6693 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6696 * Can activate the node. Discard otherwise.
6698 if (ppd->host_link_state == HLS_UP_ARMED &&
6699 ppd->is_active_optimize_enabled) {
6700 ppd->neighbor_normal = 1;
6701 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6705 "%s: received Active SMA idle message, couldn't set link to Active\n",
6711 "%s: received unexpected SMA idle message 0x%llx\n",
6717 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6720 unsigned long flags;
6722 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6723 rcvctrl = read_csr(dd, RCV_CTRL);
6726 write_csr(dd, RCV_CTRL, rcvctrl);
6727 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6730 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6732 adjust_rcvctrl(dd, add, 0);
6735 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6737 adjust_rcvctrl(dd, 0, clear);
6741 * Called from all interrupt handlers to start handling an SPC freeze.
6743 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6745 struct hfi1_devdata *dd = ppd->dd;
6746 struct send_context *sc;
6750 if (flags & FREEZE_SELF)
6751 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6753 /* enter frozen mode */
6754 dd->flags |= HFI1_FROZEN;
6756 /* notify all SDMA engines that they are going into a freeze */
6757 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6759 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6761 /* do halt pre-handling on all enabled send contexts */
6762 for (i = 0; i < dd->num_send_contexts; i++) {
6763 sc = dd->send_contexts[i].sc;
6764 if (sc && (sc->flags & SCF_ENABLED))
6765 sc_stop(sc, sc_flags);
6768 /* Send context are frozen. Notify user space */
6769 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6771 if (flags & FREEZE_ABORT) {
6773 "Aborted freeze recovery. Please REBOOT system\n");
6776 /* queue non-interrupt handler */
6777 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6781 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6782 * depending on the "freeze" parameter.
6784 * No need to return an error if it times out, our only option
6785 * is to proceed anyway.
6787 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6789 unsigned long timeout;
6792 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6794 reg = read_csr(dd, CCE_STATUS);
6796 /* waiting until all indicators are set */
6797 if ((reg & ALL_FROZE) == ALL_FROZE)
6798 return; /* all done */
6800 /* waiting until all indicators are clear */
6801 if ((reg & ALL_FROZE) == 0)
6802 return; /* all done */
6805 if (time_after(jiffies, timeout)) {
6807 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6808 freeze ? "" : "un", reg & ALL_FROZE,
6809 freeze ? ALL_FROZE : 0ull);
6812 usleep_range(80, 120);
6817 * Do all freeze handling for the RXE block.
6819 static void rxe_freeze(struct hfi1_devdata *dd)
6822 struct hfi1_ctxtdata *rcd;
6825 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6827 /* disable all receive contexts */
6828 for (i = 0; i < dd->num_rcv_contexts; i++) {
6829 rcd = hfi1_rcd_get_by_index(dd, i);
6830 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6836 * Unfreeze handling for the RXE block - kernel contexts only.
6837 * This will also enable the port. User contexts will do unfreeze
6838 * handling on a per-context basis as they call into the driver.
6841 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6845 struct hfi1_ctxtdata *rcd;
6847 /* enable all kernel contexts */
6848 for (i = 0; i < dd->num_rcv_contexts; i++) {
6849 rcd = hfi1_rcd_get_by_index(dd, i);
6851 /* Ensure all non-user contexts(including vnic) are enabled */
6853 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6857 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6858 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6859 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6860 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6861 hfi1_rcvctrl(dd, rcvmask, rcd);
6866 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6870 * Non-interrupt SPC freeze handling.
6872 * This is a work-queue function outside of the triggering interrupt.
6874 void handle_freeze(struct work_struct *work)
6876 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6878 struct hfi1_devdata *dd = ppd->dd;
6880 /* wait for freeze indicators on all affected blocks */
6881 wait_for_freeze_status(dd, 1);
6883 /* SPC is now frozen */
6885 /* do send PIO freeze steps */
6888 /* do send DMA freeze steps */
6891 /* do send egress freeze steps - nothing to do */
6893 /* do receive freeze steps */
6897 * Unfreeze the hardware - clear the freeze, wait for each
6898 * block's frozen bit to clear, then clear the frozen flag.
6900 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6901 wait_for_freeze_status(dd, 0);
6904 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6905 wait_for_freeze_status(dd, 1);
6906 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6907 wait_for_freeze_status(dd, 0);
6910 /* do send PIO unfreeze steps for kernel contexts */
6911 pio_kernel_unfreeze(dd);
6913 /* do send DMA unfreeze steps */
6916 /* do send egress unfreeze steps - nothing to do */
6918 /* do receive unfreeze steps for kernel contexts */
6919 rxe_kernel_unfreeze(dd);
6922 * The unfreeze procedure touches global device registers when
6923 * it disables and re-enables RXE. Mark the device unfrozen
6924 * after all that is done so other parts of the driver waiting
6925 * for the device to unfreeze don't do things out of order.
6927 * The above implies that the meaning of HFI1_FROZEN flag is
6928 * "Device has gone into freeze mode and freeze mode handling
6929 * is still in progress."
6931 * The flag will be removed when freeze mode processing has
6934 dd->flags &= ~HFI1_FROZEN;
6935 wake_up(&dd->event_queue);
6937 /* no longer frozen */
6941 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6943 * @ppd: info of physical Hfi port
6944 * @link_width: new link width after link up or downgrade
6946 * Update the PortXmitWait and PortVlXmitWait counters after
6947 * a link up or downgrade event to reflect a link width change.
6949 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6955 tx_width = tx_link_width(link_width);
6956 link_speed = get_link_speed(ppd->link_speed_active);
6959 * There are C_VL_COUNT number of PortVLXmitWait counters.
6960 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6962 for (i = 0; i < C_VL_COUNT + 1; i++)
6963 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6967 * Handle a link up interrupt from the 8051.
6969 * This is a work-queue function outside of the interrupt.
6971 void handle_link_up(struct work_struct *work)
6973 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6975 struct hfi1_devdata *dd = ppd->dd;
6977 set_link_state(ppd, HLS_UP_INIT);
6979 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6982 * OPA specifies that certain counters are cleared on a transition
6983 * to link up, so do that.
6985 clear_linkup_counters(dd);
6987 * And (re)set link up default values.
6989 set_linkup_defaults(ppd);
6992 * Set VL15 credits. Use cached value from verify cap interrupt.
6993 * In case of quick linkup or simulator, vl15 value will be set by
6994 * handle_linkup_change. VerifyCap interrupt handler will not be
6995 * called in those scenarios.
6997 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6998 set_up_vl15(dd, dd->vl15buf_cached);
7000 /* enforce link speed enabled */
7001 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7002 /* oops - current speed is not enabled, bounce */
7004 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7005 ppd->link_speed_active, ppd->link_speed_enabled);
7006 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7007 OPA_LINKDOWN_REASON_SPEED_POLICY);
7008 set_link_state(ppd, HLS_DN_OFFLINE);
7014 * Several pieces of LNI information were cached for SMA in ppd.
7015 * Reset these on link down
7017 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7019 ppd->neighbor_guid = 0;
7020 ppd->neighbor_port_number = 0;
7021 ppd->neighbor_type = 0;
7022 ppd->neighbor_fm_security = 0;
7025 static const char * const link_down_reason_strs[] = {
7026 [OPA_LINKDOWN_REASON_NONE] = "None",
7027 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7028 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7029 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7030 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7031 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7032 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7033 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7034 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7035 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7036 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7037 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7038 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7039 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7040 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7041 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7042 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7043 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7044 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7045 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7046 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7047 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7048 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7049 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7050 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7051 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7052 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7053 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7054 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7055 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7056 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7057 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7058 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7059 "Excessive buffer overrun",
7060 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7061 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7062 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7063 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7064 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7065 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7066 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7067 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7068 "Local media not installed",
7069 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7070 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7071 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7072 "End to end not installed",
7073 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7074 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7075 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7076 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7077 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7078 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7081 /* return the neighbor link down reason string */
7082 static const char *link_down_reason_str(u8 reason)
7084 const char *str = NULL;
7086 if (reason < ARRAY_SIZE(link_down_reason_strs))
7087 str = link_down_reason_strs[reason];
7095 * Handle a link down interrupt from the 8051.
7097 * This is a work-queue function outside of the interrupt.
7099 void handle_link_down(struct work_struct *work)
7101 u8 lcl_reason, neigh_reason = 0;
7102 u8 link_down_reason;
7103 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7106 static const char ldr_str[] = "Link down reason: ";
7108 if ((ppd->host_link_state &
7109 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7110 ppd->port_type == PORT_TYPE_FIXED)
7111 ppd->offline_disabled_reason =
7112 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7114 /* Go offline first, then deal with reading/writing through 8051 */
7115 was_up = !!(ppd->host_link_state & HLS_UP);
7116 set_link_state(ppd, HLS_DN_OFFLINE);
7117 xchg(&ppd->is_link_down_queued, 0);
7121 /* link down reason is only valid if the link was up */
7122 read_link_down_reason(ppd->dd, &link_down_reason);
7123 switch (link_down_reason) {
7124 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7125 /* the link went down, no idle message reason */
7126 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7129 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7131 * The neighbor reason is only valid if an idle message
7132 * was received for it.
7134 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7135 dd_dev_info(ppd->dd,
7136 "%sNeighbor link down message %d, %s\n",
7137 ldr_str, neigh_reason,
7138 link_down_reason_str(neigh_reason));
7140 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7141 dd_dev_info(ppd->dd,
7142 "%sHost requested link to go offline\n",
7146 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7147 ldr_str, link_down_reason);
7152 * If no reason, assume peer-initiated but missed
7153 * LinkGoingDown idle flits.
7155 if (neigh_reason == 0)
7156 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7158 /* went down while polling or going up */
7159 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7162 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7164 /* inform the SMA when the link transitions from up to down */
7165 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7166 ppd->neigh_link_down_reason.sma == 0) {
7167 ppd->local_link_down_reason.sma =
7168 ppd->local_link_down_reason.latest;
7169 ppd->neigh_link_down_reason.sma =
7170 ppd->neigh_link_down_reason.latest;
7173 reset_neighbor_info(ppd);
7175 /* disable the port */
7176 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7179 * If there is no cable attached, turn the DC off. Otherwise,
7180 * start the link bring up.
7182 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7183 dc_shutdown(ppd->dd);
7188 void handle_link_bounce(struct work_struct *work)
7190 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7194 * Only do something if the link is currently up.
7196 if (ppd->host_link_state & HLS_UP) {
7197 set_link_state(ppd, HLS_DN_OFFLINE);
7200 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7201 __func__, link_state_name(ppd->host_link_state));
7206 * Mask conversion: Capability exchange to Port LTP. The capability
7207 * exchange has an implicit 16b CRC that is mandatory.
7209 static int cap_to_port_ltp(int cap)
7211 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7213 if (cap & CAP_CRC_14B)
7214 port_ltp |= PORT_LTP_CRC_MODE_14;
7215 if (cap & CAP_CRC_48B)
7216 port_ltp |= PORT_LTP_CRC_MODE_48;
7217 if (cap & CAP_CRC_12B_16B_PER_LANE)
7218 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7224 * Convert an OPA Port LTP mask to capability mask
7226 int port_ltp_to_cap(int port_ltp)
7230 if (port_ltp & PORT_LTP_CRC_MODE_14)
7231 cap_mask |= CAP_CRC_14B;
7232 if (port_ltp & PORT_LTP_CRC_MODE_48)
7233 cap_mask |= CAP_CRC_48B;
7234 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7235 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7241 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7243 static int lcb_to_port_ltp(int lcb_crc)
7247 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7248 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7249 else if (lcb_crc == LCB_CRC_48B)
7250 port_ltp = PORT_LTP_CRC_MODE_48;
7251 else if (lcb_crc == LCB_CRC_14B)
7252 port_ltp = PORT_LTP_CRC_MODE_14;
7254 port_ltp = PORT_LTP_CRC_MODE_16;
7259 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7261 if (ppd->pkeys[2] != 0) {
7263 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7264 hfi1_event_pkey_change(ppd->dd, ppd->port);
7269 * Convert the given link width to the OPA link width bitmask.
7271 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7276 * Simulator and quick linkup do not set the width.
7277 * Just set it to 4x without complaint.
7279 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7280 return OPA_LINK_WIDTH_4X;
7281 return 0; /* no lanes up */
7282 case 1: return OPA_LINK_WIDTH_1X;
7283 case 2: return OPA_LINK_WIDTH_2X;
7284 case 3: return OPA_LINK_WIDTH_3X;
7286 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7289 case 4: return OPA_LINK_WIDTH_4X;
7294 * Do a population count on the bottom nibble.
7296 static const u8 bit_counts[16] = {
7297 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7300 static inline u8 nibble_to_count(u8 nibble)
7302 return bit_counts[nibble & 0xf];
7306 * Read the active lane information from the 8051 registers and return
7309 * Active lane information is found in these 8051 registers:
7313 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7319 u8 tx_polarity_inversion;
7320 u8 rx_polarity_inversion;
7323 /* read the active lanes */
7324 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7325 &rx_polarity_inversion, &max_rate);
7326 read_local_lni(dd, &enable_lane_rx);
7328 /* convert to counts */
7329 tx = nibble_to_count(enable_lane_tx);
7330 rx = nibble_to_count(enable_lane_rx);
7333 * Set link_speed_active here, overriding what was set in
7334 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7335 * set the max_rate field in handle_verify_cap until v0.19.
7337 if ((dd->icode == ICODE_RTL_SILICON) &&
7338 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7339 /* max_rate: 0 = 12.5G, 1 = 25G */
7342 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7346 "%s: unexpected max rate %d, using 25Gb\n",
7347 __func__, (int)max_rate);
7350 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7356 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7357 enable_lane_tx, tx, enable_lane_rx, rx);
7358 *tx_width = link_width_to_bits(dd, tx);
7359 *rx_width = link_width_to_bits(dd, rx);
7363 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7364 * Valid after the end of VerifyCap and during LinkUp. Does not change
7365 * after link up. I.e. look elsewhere for downgrade information.
7368 * + bits [7:4] contain the number of active transmitters
7369 * + bits [3:0] contain the number of active receivers
7370 * These are numbers 1 through 4 and can be different values if the
7371 * link is asymmetric.
7373 * verify_cap_local_fm_link_width[0] retains its original value.
7375 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7379 u8 misc_bits, local_flags;
7380 u16 active_tx, active_rx;
7382 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7384 rx = (widths >> 8) & 0xf;
7386 *tx_width = link_width_to_bits(dd, tx);
7387 *rx_width = link_width_to_bits(dd, rx);
7389 /* print the active widths */
7390 get_link_widths(dd, &active_tx, &active_rx);
7394 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7395 * hardware information when the link first comes up.
7397 * The link width is not available until after VerifyCap.AllFramesReceived
7398 * (the trigger for handle_verify_cap), so this is outside that routine
7399 * and should be called when the 8051 signals linkup.
7401 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7403 u16 tx_width, rx_width;
7405 /* get end-of-LNI link widths */
7406 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7408 /* use tx_width as the link is supposed to be symmetric on link up */
7409 ppd->link_width_active = tx_width;
7410 /* link width downgrade active (LWD.A) starts out matching LW.A */
7411 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7412 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7413 /* per OPA spec, on link up LWD.E resets to LWD.S */
7414 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7415 /* cache the active egress rate (units {10^6 bits/sec]) */
7416 ppd->current_egress_rate = active_egress_rate(ppd);
7420 * Handle a verify capabilities interrupt from the 8051.
7422 * This is a work-queue function outside of the interrupt.
7424 void handle_verify_cap(struct work_struct *work)
7426 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7428 struct hfi1_devdata *dd = ppd->dd;
7430 u8 power_management;
7440 u16 active_tx, active_rx;
7441 u8 partner_supported_crc;
7445 set_link_state(ppd, HLS_VERIFY_CAP);
7447 lcb_shutdown(dd, 0);
7448 adjust_lcb_for_fpga_serdes(dd);
7450 read_vc_remote_phy(dd, &power_management, &continuous);
7451 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7452 &partner_supported_crc);
7453 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7454 read_remote_device_id(dd, &device_id, &device_rev);
7456 /* print the active widths */
7457 get_link_widths(dd, &active_tx, &active_rx);
7459 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7460 (int)power_management, (int)continuous);
7462 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7463 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7464 (int)partner_supported_crc);
7465 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7466 (u32)remote_tx_rate, (u32)link_widths);
7467 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7468 (u32)device_id, (u32)device_rev);
7470 * The peer vAU value just read is the peer receiver value. HFI does
7471 * not support a transmit vAU of 0 (AU == 8). We advertised that
7472 * with Z=1 in the fabric capabilities sent to the peer. The peer
7473 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7474 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7475 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7476 * subject to the Z value exception.
7480 set_up_vau(dd, vau);
7483 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7484 * credits value and wait for link-up interrupt ot set it.
7487 dd->vl15buf_cached = vl15buf;
7489 /* set up the LCB CRC mode */
7490 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7492 /* order is important: use the lowest bit in common */
7493 if (crc_mask & CAP_CRC_14B)
7494 crc_val = LCB_CRC_14B;
7495 else if (crc_mask & CAP_CRC_48B)
7496 crc_val = LCB_CRC_48B;
7497 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7498 crc_val = LCB_CRC_12B_16B_PER_LANE;
7500 crc_val = LCB_CRC_16B;
7502 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7503 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7504 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7506 /* set (14b only) or clear sideband credit */
7507 reg = read_csr(dd, SEND_CM_CTRL);
7508 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7509 write_csr(dd, SEND_CM_CTRL,
7510 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7512 write_csr(dd, SEND_CM_CTRL,
7513 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7516 ppd->link_speed_active = 0; /* invalid value */
7517 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7518 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7519 switch (remote_tx_rate) {
7521 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7524 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7528 /* actual rate is highest bit of the ANDed rates */
7529 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7532 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7534 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7536 if (ppd->link_speed_active == 0) {
7537 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7538 __func__, (int)remote_tx_rate);
7539 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7543 * Cache the values of the supported, enabled, and active
7544 * LTP CRC modes to return in 'portinfo' queries. But the bit
7545 * flags that are returned in the portinfo query differ from
7546 * what's in the link_crc_mask, crc_sizes, and crc_val
7547 * variables. Convert these here.
7549 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7550 /* supported crc modes */
7551 ppd->port_ltp_crc_mode |=
7552 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7553 /* enabled crc modes */
7554 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7555 /* active crc mode */
7557 /* set up the remote credit return table */
7558 assign_remote_cm_au_table(dd, vcu);
7561 * The LCB is reset on entry to handle_verify_cap(), so this must
7562 * be applied on every link up.
7564 * Adjust LCB error kill enable to kill the link if
7565 * these RBUF errors are seen:
7566 * REPLAY_BUF_MBE_SMASK
7567 * FLIT_INPUT_BUF_MBE_SMASK
7569 if (is_ax(dd)) { /* fixed in B0 */
7570 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7571 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7572 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7573 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7576 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7577 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7579 /* give 8051 access to the LCB CSRs */
7580 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7581 set_8051_lcb_access(dd);
7583 /* tell the 8051 to go to LinkUp */
7584 set_link_state(ppd, HLS_GOING_UP);
7588 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7589 * policy against the current active link widths.
7590 * @ppd: info of physical Hfi port
7591 * @refresh_widths: True indicates link downgrade event
7592 * @return: True indicates a successful link downgrade. False indicates
7593 * link downgrade event failed and the link will bounce back to
7594 * default link width.
7596 * Called when the enabled policy changes or the active link widths
7598 * Refresh_widths indicates that a link downgrade occurred. The
7599 * link_downgraded variable is set by refresh_widths and
7600 * determines the success/failure of the policy application.
7602 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7603 bool refresh_widths)
7609 bool link_downgraded = refresh_widths;
7611 /* use the hls lock to avoid a race with actual link up */
7614 mutex_lock(&ppd->hls_lock);
7615 /* only apply if the link is up */
7616 if (ppd->host_link_state & HLS_DOWN) {
7617 /* still going up..wait and retry */
7618 if (ppd->host_link_state & HLS_GOING_UP) {
7619 if (++tries < 1000) {
7620 mutex_unlock(&ppd->hls_lock);
7621 usleep_range(100, 120); /* arbitrary */
7625 "%s: giving up waiting for link state change\n",
7631 lwde = ppd->link_width_downgrade_enabled;
7633 if (refresh_widths) {
7634 get_link_widths(ppd->dd, &tx, &rx);
7635 ppd->link_width_downgrade_tx_active = tx;
7636 ppd->link_width_downgrade_rx_active = rx;
7639 if (ppd->link_width_downgrade_tx_active == 0 ||
7640 ppd->link_width_downgrade_rx_active == 0) {
7641 /* the 8051 reported a dead link as a downgrade */
7642 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7643 link_downgraded = false;
7644 } else if (lwde == 0) {
7645 /* downgrade is disabled */
7647 /* bounce if not at starting active width */
7648 if ((ppd->link_width_active !=
7649 ppd->link_width_downgrade_tx_active) ||
7650 (ppd->link_width_active !=
7651 ppd->link_width_downgrade_rx_active)) {
7653 "Link downgrade is disabled and link has downgraded, downing link\n");
7655 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7656 ppd->link_width_active,
7657 ppd->link_width_downgrade_tx_active,
7658 ppd->link_width_downgrade_rx_active);
7660 link_downgraded = false;
7662 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7663 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7664 /* Tx or Rx is outside the enabled policy */
7666 "Link is outside of downgrade allowed, downing link\n");
7668 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7669 lwde, ppd->link_width_downgrade_tx_active,
7670 ppd->link_width_downgrade_rx_active);
7672 link_downgraded = false;
7676 mutex_unlock(&ppd->hls_lock);
7679 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7680 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7681 set_link_state(ppd, HLS_DN_OFFLINE);
7685 return link_downgraded;
7689 * Handle a link downgrade interrupt from the 8051.
7691 * This is a work-queue function outside of the interrupt.
7693 void handle_link_downgrade(struct work_struct *work)
7695 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7696 link_downgrade_work);
7698 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7699 if (apply_link_downgrade_policy(ppd, true))
7700 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7703 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7705 return flag_string(buf, buf_len, flags, dcc_err_flags,
7706 ARRAY_SIZE(dcc_err_flags));
7709 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7711 return flag_string(buf, buf_len, flags, lcb_err_flags,
7712 ARRAY_SIZE(lcb_err_flags));
7715 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7717 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7718 ARRAY_SIZE(dc8051_err_flags));
7721 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7723 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7724 ARRAY_SIZE(dc8051_info_err_flags));
7727 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7729 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7730 ARRAY_SIZE(dc8051_info_host_msg_flags));
7733 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7735 struct hfi1_pportdata *ppd = dd->pport;
7736 u64 info, err, host_msg;
7737 int queue_link_down = 0;
7740 /* look at the flags */
7741 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7742 /* 8051 information set by firmware */
7743 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7744 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7745 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7746 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7748 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7749 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7752 * Handle error flags.
7754 if (err & FAILED_LNI) {
7756 * LNI error indications are cleared by the 8051
7757 * only when starting polling. Only pay attention
7758 * to them when in the states that occur during
7761 if (ppd->host_link_state
7762 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7763 queue_link_down = 1;
7764 dd_dev_info(dd, "Link error: %s\n",
7765 dc8051_info_err_string(buf,
7770 err &= ~(u64)FAILED_LNI;
7772 /* unknown frames can happen durning LNI, just count */
7773 if (err & UNKNOWN_FRAME) {
7774 ppd->unknown_frame_count++;
7775 err &= ~(u64)UNKNOWN_FRAME;
7778 /* report remaining errors, but do not do anything */
7779 dd_dev_err(dd, "8051 info error: %s\n",
7780 dc8051_info_err_string(buf, sizeof(buf),
7785 * Handle host message flags.
7787 if (host_msg & HOST_REQ_DONE) {
7789 * Presently, the driver does a busy wait for
7790 * host requests to complete. This is only an
7791 * informational message.
7792 * NOTE: The 8051 clears the host message
7793 * information *on the next 8051 command*.
7794 * Therefore, when linkup is achieved,
7795 * this flag will still be set.
7797 host_msg &= ~(u64)HOST_REQ_DONE;
7799 if (host_msg & BC_SMA_MSG) {
7800 queue_work(ppd->link_wq, &ppd->sma_message_work);
7801 host_msg &= ~(u64)BC_SMA_MSG;
7803 if (host_msg & LINKUP_ACHIEVED) {
7804 dd_dev_info(dd, "8051: Link up\n");
7805 queue_work(ppd->link_wq, &ppd->link_up_work);
7806 host_msg &= ~(u64)LINKUP_ACHIEVED;
7808 if (host_msg & EXT_DEVICE_CFG_REQ) {
7809 handle_8051_request(ppd);
7810 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7812 if (host_msg & VERIFY_CAP_FRAME) {
7813 queue_work(ppd->link_wq, &ppd->link_vc_work);
7814 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7816 if (host_msg & LINK_GOING_DOWN) {
7817 const char *extra = "";
7818 /* no downgrade action needed if going down */
7819 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7820 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7821 extra = " (ignoring downgrade)";
7823 dd_dev_info(dd, "8051: Link down%s\n", extra);
7824 queue_link_down = 1;
7825 host_msg &= ~(u64)LINK_GOING_DOWN;
7827 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7828 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7829 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7832 /* report remaining messages, but do not do anything */
7833 dd_dev_info(dd, "8051 info host message: %s\n",
7834 dc8051_info_host_msg_string(buf,
7839 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7841 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7843 * Lost the 8051 heartbeat. If this happens, we
7844 * receive constant interrupts about it. Disable
7845 * the interrupt after the first.
7847 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7848 write_csr(dd, DC_DC8051_ERR_EN,
7849 read_csr(dd, DC_DC8051_ERR_EN) &
7850 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7852 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7855 /* report the error, but do not do anything */
7856 dd_dev_err(dd, "8051 error: %s\n",
7857 dc8051_err_string(buf, sizeof(buf), reg));
7860 if (queue_link_down) {
7862 * if the link is already going down or disabled, do not
7863 * queue another. If there's a link down entry already
7864 * queued, don't queue another one.
7866 if ((ppd->host_link_state &
7867 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7868 ppd->link_enabled == 0) {
7869 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7870 __func__, ppd->host_link_state,
7873 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7875 "%s: link down request already queued\n",
7878 queue_work(ppd->link_wq, &ppd->link_down_work);
7883 static const char * const fm_config_txt[] = {
7885 "BadHeadDist: Distance violation between two head flits",
7887 "BadTailDist: Distance violation between two tail flits",
7889 "BadCtrlDist: Distance violation between two credit control flits",
7891 "BadCrdAck: Credits return for unsupported VL",
7893 "UnsupportedVLMarker: Received VL Marker",
7895 "BadPreempt: Exceeded the preemption nesting level",
7897 "BadControlFlit: Received unsupported control flit",
7900 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7903 static const char * const port_rcv_txt[] = {
7905 "BadPktLen: Illegal PktLen",
7907 "PktLenTooLong: Packet longer than PktLen",
7909 "PktLenTooShort: Packet shorter than PktLen",
7911 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7913 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7915 "BadL2: Illegal L2 opcode",
7917 "BadSC: Unsupported SC",
7919 "BadRC: Illegal RC",
7921 "PreemptError: Preempting with same VL",
7923 "PreemptVL15: Preempting a VL15 packet",
7926 #define OPA_LDR_FMCONFIG_OFFSET 16
7927 #define OPA_LDR_PORTRCV_OFFSET 0
7928 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7930 u64 info, hdr0, hdr1;
7933 struct hfi1_pportdata *ppd = dd->pport;
7937 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7938 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7939 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7940 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7941 /* set status bit */
7942 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7944 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7947 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7948 struct hfi1_pportdata *ppd = dd->pport;
7949 /* this counter saturates at (2^32) - 1 */
7950 if (ppd->link_downed < (u32)UINT_MAX)
7952 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7955 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7956 u8 reason_valid = 1;
7958 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7959 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7960 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7961 /* set status bit */
7962 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7972 extra = fm_config_txt[info];
7975 extra = fm_config_txt[info];
7976 if (ppd->port_error_action &
7977 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7980 * lcl_reason cannot be derived from info
7984 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7989 snprintf(buf, sizeof(buf), "reserved%lld", info);
7994 if (reason_valid && !do_bounce) {
7995 do_bounce = ppd->port_error_action &
7996 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7997 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8000 /* just report this */
8001 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8003 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8006 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8007 u8 reason_valid = 1;
8009 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8010 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8011 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8012 if (!(dd->err_info_rcvport.status_and_code &
8013 OPA_EI_STATUS_SMASK)) {
8014 dd->err_info_rcvport.status_and_code =
8015 info & OPA_EI_CODE_SMASK;
8016 /* set status bit */
8017 dd->err_info_rcvport.status_and_code |=
8018 OPA_EI_STATUS_SMASK;
8020 * save first 2 flits in the packet that caused
8023 dd->err_info_rcvport.packet_flit1 = hdr0;
8024 dd->err_info_rcvport.packet_flit2 = hdr1;
8037 extra = port_rcv_txt[info];
8041 snprintf(buf, sizeof(buf), "reserved%lld", info);
8046 if (reason_valid && !do_bounce) {
8047 do_bounce = ppd->port_error_action &
8048 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8049 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8052 /* just report this */
8053 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8054 " hdr0 0x%llx, hdr1 0x%llx\n",
8057 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8060 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8061 /* informative only */
8062 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8063 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8065 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8066 /* informative only */
8067 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8068 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8071 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8072 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8074 /* report any remaining errors */
8076 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8077 dcc_err_string(buf, sizeof(buf), reg));
8079 if (lcl_reason == 0)
8080 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8083 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8085 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8086 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8090 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8094 dd_dev_info(dd, "LCB Error: %s\n",
8095 lcb_err_string(buf, sizeof(buf), reg));
8099 * CCE block DC interrupt. Source is < 8.
8101 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8103 const struct err_reg_info *eri = &dc_errs[source];
8106 interrupt_clear_down(dd, 0, eri);
8107 } else if (source == 3 /* dc_lbm_int */) {
8109 * This indicates that a parity error has occurred on the
8110 * address/control lines presented to the LBM. The error
8111 * is a single pulse, there is no associated error flag,
8112 * and it is non-maskable. This is because if a parity
8113 * error occurs on the request the request is dropped.
8114 * This should never occur, but it is nice to know if it
8117 dd_dev_err(dd, "Parity error in DC LBM block\n");
8119 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8124 * TX block send credit interrupt. Source is < 160.
8126 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8128 sc_group_release_update(dd, source);
8132 * TX block SDMA interrupt. Source is < 48.
8134 * SDMA interrupts are grouped by type:
8137 * N - 2N-1 = SDmaProgress
8138 * 2N - 3N-1 = SDmaIdle
8140 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8142 /* what interrupt */
8143 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8145 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8147 #ifdef CONFIG_SDMA_VERBOSITY
8148 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8149 slashstrip(__FILE__), __LINE__, __func__);
8150 sdma_dumpstate(&dd->per_sdma[which]);
8153 if (likely(what < 3 && which < dd->num_sdma)) {
8154 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8156 /* should not happen */
8157 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8162 * is_rcv_avail_int() - User receive context available IRQ handler
8164 * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8166 * RX block receive available interrupt. Source is < 160.
8168 * This is the general interrupt handler for user (PSM) receive contexts,
8169 * and can only be used for non-threaded IRQs.
8171 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8173 struct hfi1_ctxtdata *rcd;
8176 if (likely(source < dd->num_rcv_contexts)) {
8177 rcd = hfi1_rcd_get_by_index(dd, source);
8179 handle_user_interrupt(rcd);
8183 /* received an interrupt, but no rcd */
8184 err_detail = "dataless";
8186 /* received an interrupt, but are not using that context */
8187 err_detail = "out of range";
8189 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8190 err_detail, source);
8194 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8196 * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
8198 * RX block receive urgent interrupt. Source is < 160.
8200 * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8202 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8204 struct hfi1_ctxtdata *rcd;
8207 if (likely(source < dd->num_rcv_contexts)) {
8208 rcd = hfi1_rcd_get_by_index(dd, source);
8210 handle_user_interrupt(rcd);
8214 /* received an interrupt, but no rcd */
8215 err_detail = "dataless";
8217 /* received an interrupt, but are not using that context */
8218 err_detail = "out of range";
8220 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8221 err_detail, source);
8225 * Reserved range interrupt. Should not be called in normal operation.
8227 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8231 dd_dev_err(dd, "unexpected %s interrupt\n",
8232 is_reserved_name(name, sizeof(name), source));
8235 static const struct is_table is_table[] = {
8238 * name func interrupt func
8240 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8241 is_misc_err_name, is_misc_err_int },
8242 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8243 is_sdma_eng_err_name, is_sdma_eng_err_int },
8244 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8245 is_sendctxt_err_name, is_sendctxt_err_int },
8246 { IS_SDMA_START, IS_SDMA_END,
8247 is_sdma_eng_name, is_sdma_eng_int },
8248 { IS_VARIOUS_START, IS_VARIOUS_END,
8249 is_various_name, is_various_int },
8250 { IS_DC_START, IS_DC_END,
8251 is_dc_name, is_dc_int },
8252 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8253 is_rcv_avail_name, is_rcv_avail_int },
8254 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8255 is_rcv_urgent_name, is_rcv_urgent_int },
8256 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8257 is_send_credit_name, is_send_credit_int},
8258 { IS_RESERVED_START, IS_RESERVED_END,
8259 is_reserved_name, is_reserved_int},
8263 * Interrupt source interrupt - called when the given source has an interrupt.
8264 * Source is a bit index into an array of 64-bit integers.
8266 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8268 const struct is_table *entry;
8270 /* avoids a double compare by walking the table in-order */
8271 for (entry = &is_table[0]; entry->is_name; entry++) {
8272 if (source < entry->end) {
8273 trace_hfi1_interrupt(dd, entry, source);
8274 entry->is_int(dd, source - entry->start);
8278 /* fell off the end */
8279 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8283 * gerneral_interrupt() - General interrupt handler
8284 * @irq: MSIx IRQ vector
8285 * @data: hfi1 devdata
8287 * This is able to correctly handle all non-threaded interrupts. Receive
8288 * context DATA IRQs are threaded and are not supported by this handler.
8291 static irqreturn_t general_interrupt(int irq, void *data)
8293 struct hfi1_devdata *dd = data;
8294 u64 regs[CCE_NUM_INT_CSRS];
8297 irqreturn_t handled = IRQ_NONE;
8299 this_cpu_inc(*dd->int_counter);
8301 /* phase 1: scan and clear all handled interrupts */
8302 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8303 if (dd->gi_mask[i] == 0) {
8304 regs[i] = 0; /* used later */
8307 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8309 /* only clear if anything is set */
8311 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8314 /* phase 2: call the appropriate handler */
8315 for_each_set_bit(bit, (unsigned long *)®s[0],
8316 CCE_NUM_INT_CSRS * 64) {
8317 is_interrupt(dd, bit);
8318 handled = IRQ_HANDLED;
8324 static irqreturn_t sdma_interrupt(int irq, void *data)
8326 struct sdma_engine *sde = data;
8327 struct hfi1_devdata *dd = sde->dd;
8330 #ifdef CONFIG_SDMA_VERBOSITY
8331 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8332 slashstrip(__FILE__), __LINE__, __func__);
8333 sdma_dumpstate(sde);
8336 this_cpu_inc(*dd->int_counter);
8338 /* This read_csr is really bad in the hot path */
8339 status = read_csr(dd,
8340 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8342 if (likely(status)) {
8343 /* clear the interrupt(s) */
8345 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8348 /* handle the interrupt(s) */
8349 sdma_engine_interrupt(sde, status);
8351 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8358 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8359 * to insure that the write completed. This does NOT guarantee that
8360 * queued DMA writes to memory from the chip are pushed.
8362 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8364 struct hfi1_devdata *dd = rcd->dd;
8365 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8367 mmiowb(); /* make sure everything before is written */
8368 write_csr(dd, addr, rcd->imask);
8369 /* force the above write on the chip and get a value back */
8370 (void)read_csr(dd, addr);
8373 /* force the receive interrupt */
8374 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8376 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8380 * Return non-zero if a packet is present.
8382 * This routine is called when rechecking for packets after the RcvAvail
8383 * interrupt has been cleared down. First, do a quick check of memory for
8384 * a packet present. If not found, use an expensive CSR read of the context
8385 * tail to determine the actual tail. The CSR read is necessary because there
8386 * is no method to push pending DMAs to memory other than an interrupt and we
8387 * are trying to determine if we need to force an interrupt.
8389 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8394 if (!rcd->rcvhdrtail_kvaddr)
8395 present = (rcd->seq_cnt ==
8396 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8397 else /* is RDMA rtail */
8398 present = (rcd->head != get_rcvhdrtail(rcd));
8403 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8404 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8405 return rcd->head != tail;
8409 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8410 * This routine will try to handle packets immediately (latency), but if
8411 * it finds too many, it will invoke the thread handler (bandwitdh). The
8412 * chip receive interrupt is *not* cleared down until this or the thread (if
8413 * invoked) is finished. The intent is to avoid extra interrupts while we
8414 * are processing packets anyway.
8416 static irqreturn_t receive_context_interrupt(int irq, void *data)
8418 struct hfi1_ctxtdata *rcd = data;
8419 struct hfi1_devdata *dd = rcd->dd;
8423 trace_hfi1_receive_interrupt(dd, rcd);
8424 this_cpu_inc(*dd->int_counter);
8425 aspm_ctx_disable(rcd);
8427 /* receive interrupt remains blocked while processing packets */
8428 disposition = rcd->do_interrupt(rcd, 0);
8431 * Too many packets were seen while processing packets in this
8432 * IRQ handler. Invoke the handler thread. The receive interrupt
8435 if (disposition == RCV_PKT_LIMIT)
8436 return IRQ_WAKE_THREAD;
8439 * The packet processor detected no more packets. Clear the receive
8440 * interrupt and recheck for a packet packet that may have arrived
8441 * after the previous check and interrupt clear. If a packet arrived,
8442 * force another interrupt.
8444 clear_recv_intr(rcd);
8445 present = check_packet_present(rcd);
8447 force_recv_intr(rcd);
8453 * Receive packet thread handler. This expects to be invoked with the
8454 * receive interrupt still blocked.
8456 static irqreturn_t receive_context_thread(int irq, void *data)
8458 struct hfi1_ctxtdata *rcd = data;
8461 /* receive interrupt is still blocked from the IRQ handler */
8462 (void)rcd->do_interrupt(rcd, 1);
8465 * The packet processor will only return if it detected no more
8466 * packets. Hold IRQs here so we can safely clear the interrupt and
8467 * recheck for a packet that may have arrived after the previous
8468 * check and the interrupt clear. If a packet arrived, force another
8471 local_irq_disable();
8472 clear_recv_intr(rcd);
8473 present = check_packet_present(rcd);
8475 force_recv_intr(rcd);
8481 /* ========================================================================= */
8483 u32 read_physical_state(struct hfi1_devdata *dd)
8487 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8488 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8489 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8492 u32 read_logical_state(struct hfi1_devdata *dd)
8496 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8497 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8498 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8501 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8505 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8506 /* clear current state, set new state */
8507 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8508 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8509 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8513 * Use the 8051 to read a LCB CSR.
8515 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8520 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8521 if (acquire_lcb_access(dd, 0) == 0) {
8522 *data = read_csr(dd, addr);
8523 release_lcb_access(dd, 0);
8529 /* register is an index of LCB registers: (offset - base) / 8 */
8530 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8531 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8532 if (ret != HCMD_SUCCESS)
8538 * Provide a cache for some of the LCB registers in case the LCB is
8540 * (The LCB is unavailable in certain link states, for example.)
8547 static struct lcb_datum lcb_cache[] = {
8548 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8549 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8550 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8553 static void update_lcb_cache(struct hfi1_devdata *dd)
8559 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8560 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8562 /* Update if we get good data */
8563 if (likely(ret != -EBUSY))
8564 lcb_cache[i].val = val;
8568 static int read_lcb_cache(u32 off, u64 *val)
8572 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8573 if (lcb_cache[i].off == off) {
8574 *val = lcb_cache[i].val;
8579 pr_warn("%s bad offset 0x%x\n", __func__, off);
8584 * Read an LCB CSR. Access may not be in host control, so check.
8585 * Return 0 on success, -EBUSY on failure.
8587 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8589 struct hfi1_pportdata *ppd = dd->pport;
8591 /* if up, go through the 8051 for the value */
8592 if (ppd->host_link_state & HLS_UP)
8593 return read_lcb_via_8051(dd, addr, data);
8594 /* if going up or down, check the cache, otherwise, no access */
8595 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8596 if (read_lcb_cache(addr, data))
8601 /* otherwise, host has access */
8602 *data = read_csr(dd, addr);
8607 * Use the 8051 to write a LCB CSR.
8609 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8614 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8615 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8616 if (acquire_lcb_access(dd, 0) == 0) {
8617 write_csr(dd, addr, data);
8618 release_lcb_access(dd, 0);
8624 /* register is an index of LCB registers: (offset - base) / 8 */
8625 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8626 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8627 if (ret != HCMD_SUCCESS)
8633 * Write an LCB CSR. Access may not be in host control, so check.
8634 * Return 0 on success, -EBUSY on failure.
8636 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8638 struct hfi1_pportdata *ppd = dd->pport;
8640 /* if up, go through the 8051 for the value */
8641 if (ppd->host_link_state & HLS_UP)
8642 return write_lcb_via_8051(dd, addr, data);
8643 /* if going up or down, no access */
8644 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8646 /* otherwise, host has access */
8647 write_csr(dd, addr, data);
8653 * < 0 = Linux error, not able to get access
8654 * > 0 = 8051 command RETURN_CODE
8656 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8661 unsigned long timeout;
8663 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8665 mutex_lock(&dd->dc8051_lock);
8667 /* We can't send any commands to the 8051 if it's in reset */
8668 if (dd->dc_shutdown) {
8669 return_code = -ENODEV;
8674 * If an 8051 host command timed out previously, then the 8051 is
8677 * On first timeout, attempt to reset and restart the entire DC
8678 * block (including 8051). (Is this too big of a hammer?)
8680 * If the 8051 times out a second time, the reset did not bring it
8681 * back to healthy life. In that case, fail any subsequent commands.
8683 if (dd->dc8051_timed_out) {
8684 if (dd->dc8051_timed_out > 1) {
8686 "Previous 8051 host command timed out, skipping command %u\n",
8688 return_code = -ENXIO;
8696 * If there is no timeout, then the 8051 command interface is
8697 * waiting for a command.
8701 * When writing a LCB CSR, out_data contains the full value to
8702 * to be written, while in_data contains the relative LCB
8703 * address in 7:0. Do the work here, rather than the caller,
8704 * of distrubting the write data to where it needs to go:
8707 * 39:00 -> in_data[47:8]
8708 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8709 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8711 if (type == HCMD_WRITE_LCB_CSR) {
8712 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8713 /* must preserve COMPLETED - it is tied to hardware */
8714 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8715 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8716 reg |= ((((*out_data) >> 40) & 0xff) <<
8717 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8718 | ((((*out_data) >> 48) & 0xffff) <<
8719 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8720 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8724 * Do two writes: the first to stabilize the type and req_data, the
8725 * second to activate.
8727 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8728 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8729 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8730 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8731 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8732 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8733 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8735 /* wait for completion, alternate: interrupt */
8736 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8738 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8739 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8742 if (time_after(jiffies, timeout)) {
8743 dd->dc8051_timed_out++;
8744 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8747 return_code = -ETIMEDOUT;
8754 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8755 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8756 if (type == HCMD_READ_LCB_CSR) {
8757 /* top 16 bits are in a different register */
8758 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8759 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8761 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8764 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8765 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8766 dd->dc8051_timed_out = 0;
8768 * Clear command for next user.
8770 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8773 mutex_unlock(&dd->dc8051_lock);
8777 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8779 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8782 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8783 u8 lane_id, u32 config_data)
8788 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8789 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8790 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8791 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8792 if (ret != HCMD_SUCCESS) {
8794 "load 8051 config: field id %d, lane %d, err %d\n",
8795 (int)field_id, (int)lane_id, ret);
8801 * Read the 8051 firmware "registers". Use the RAM directly. Always
8802 * set the result, even on error.
8803 * Return 0 on success, -errno on failure
8805 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8812 /* address start depends on the lane_id */
8814 addr = (4 * NUM_GENERAL_FIELDS)
8815 + (lane_id * 4 * NUM_LANE_FIELDS);
8818 addr += field_id * 4;
8820 /* read is in 8-byte chunks, hardware will truncate the address down */
8821 ret = read_8051_data(dd, addr, 8, &big_data);
8824 /* extract the 4 bytes we want */
8826 *result = (u32)(big_data >> 32);
8828 *result = (u32)big_data;
8831 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8832 __func__, lane_id, field_id);
8838 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8843 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8844 | power_management << POWER_MANAGEMENT_SHIFT;
8845 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8846 GENERAL_CONFIG, frame);
8849 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8850 u16 vl15buf, u8 crc_sizes)
8854 frame = (u32)vau << VAU_SHIFT
8856 | (u32)vcu << VCU_SHIFT
8857 | (u32)vl15buf << VL15BUF_SHIFT
8858 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8859 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8860 GENERAL_CONFIG, frame);
8863 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8864 u8 *flag_bits, u16 *link_widths)
8868 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8870 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8871 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8872 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8875 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8882 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8883 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8884 | (u32)link_widths << LINK_WIDTH_SHIFT;
8885 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8889 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8894 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8895 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8896 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8899 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8904 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8905 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8906 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8907 & REMOTE_DEVICE_REV_MASK;
8910 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8915 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8916 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8917 /* Clear, then set field */
8919 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8920 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8924 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8929 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8930 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8931 STS_FM_VERSION_MAJOR_MASK;
8932 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8933 STS_FM_VERSION_MINOR_MASK;
8935 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8936 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8937 STS_FM_VERSION_PATCH_MASK;
8940 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8945 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8946 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8947 & POWER_MANAGEMENT_MASK;
8948 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8949 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8952 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8953 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8957 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8958 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8959 *z = (frame >> Z_SHIFT) & Z_MASK;
8960 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8961 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8962 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8965 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8971 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8973 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8974 & REMOTE_TX_RATE_MASK;
8975 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8978 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8982 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8983 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8986 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8988 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8991 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8993 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8996 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9002 if (dd->pport->host_link_state & HLS_UP) {
9003 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9006 *link_quality = (frame >> LINK_QUALITY_SHIFT)
9007 & LINK_QUALITY_MASK;
9011 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9015 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9016 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9019 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9023 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9024 *ldr = (frame & 0xff);
9027 static int read_tx_settings(struct hfi1_devdata *dd,
9029 u8 *tx_polarity_inversion,
9030 u8 *rx_polarity_inversion,
9036 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9037 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9038 & ENABLE_LANE_TX_MASK;
9039 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9040 & TX_POLARITY_INVERSION_MASK;
9041 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9042 & RX_POLARITY_INVERSION_MASK;
9043 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9047 static int write_tx_settings(struct hfi1_devdata *dd,
9049 u8 tx_polarity_inversion,
9050 u8 rx_polarity_inversion,
9055 /* no need to mask, all variable sizes match field widths */
9056 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9057 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9058 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9059 | max_rate << MAX_RATE_SHIFT;
9060 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9064 * Read an idle LCB message.
9066 * Returns 0 on success, -EINVAL on error
9068 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9072 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9073 if (ret != HCMD_SUCCESS) {
9074 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9078 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9079 /* return only the payload as we already know the type */
9080 *data_out >>= IDLE_PAYLOAD_SHIFT;
9085 * Read an idle SMA message. To be done in response to a notification from
9088 * Returns 0 on success, -EINVAL on error
9090 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9092 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9097 * Send an idle LCB message.
9099 * Returns 0 on success, -EINVAL on error
9101 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9105 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9106 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9107 if (ret != HCMD_SUCCESS) {
9108 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9116 * Send an idle SMA message.
9118 * Returns 0 on success, -EINVAL on error
9120 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9124 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9125 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9126 return send_idle_message(dd, data);
9130 * Initialize the LCB then do a quick link up. This may or may not be
9133 * return 0 on success, -errno on error
9135 static int do_quick_linkup(struct hfi1_devdata *dd)
9139 lcb_shutdown(dd, 0);
9142 /* LCB_CFG_LOOPBACK.VAL = 2 */
9143 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9144 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9145 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9146 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9149 /* start the LCBs */
9150 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9151 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9153 /* simulator only loopback steps */
9154 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9155 /* LCB_CFG_RUN.EN = 1 */
9156 write_csr(dd, DC_LCB_CFG_RUN,
9157 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9159 ret = wait_link_transfer_active(dd, 10);
9163 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9164 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9169 * When doing quick linkup and not in loopback, both
9170 * sides must be done with LCB set-up before either
9171 * starts the quick linkup. Put a delay here so that
9172 * both sides can be started and have a chance to be
9173 * done with LCB set up before resuming.
9176 "Pausing for peer to be finished with LCB set up\n");
9178 dd_dev_err(dd, "Continuing with quick linkup\n");
9181 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9182 set_8051_lcb_access(dd);
9185 * State "quick" LinkUp request sets the physical link state to
9186 * LinkUp without a verify capability sequence.
9187 * This state is in simulator v37 and later.
9189 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9190 if (ret != HCMD_SUCCESS) {
9192 "%s: set physical link state to quick LinkUp failed with return %d\n",
9195 set_host_lcb_access(dd);
9196 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9203 return 0; /* success */
9207 * Do all special steps to set up loopback.
9209 static int init_loopback(struct hfi1_devdata *dd)
9211 dd_dev_info(dd, "Entering loopback mode\n");
9213 /* all loopbacks should disable self GUID check */
9214 write_csr(dd, DC_DC8051_CFG_MODE,
9215 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9218 * The simulator has only one loopback option - LCB. Switch
9219 * to that option, which includes quick link up.
9221 * Accept all valid loopback values.
9223 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9224 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9225 loopback == LOOPBACK_CABLE)) {
9226 loopback = LOOPBACK_LCB;
9232 * SerDes loopback init sequence is handled in set_local_link_attributes
9234 if (loopback == LOOPBACK_SERDES)
9237 /* LCB loopback - handled at poll time */
9238 if (loopback == LOOPBACK_LCB) {
9239 quick_linkup = 1; /* LCB is always quick linkup */
9241 /* not supported in emulation due to emulation RTL changes */
9242 if (dd->icode == ICODE_FPGA_EMULATION) {
9244 "LCB loopback not supported in emulation\n");
9250 /* external cable loopback requires no extra steps */
9251 if (loopback == LOOPBACK_CABLE)
9254 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9259 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9260 * used in the Verify Capability link width attribute.
9262 static u16 opa_to_vc_link_widths(u16 opa_widths)
9267 static const struct link_bits {
9270 } opa_link_xlate[] = {
9271 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9272 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9273 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9274 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9277 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9278 if (opa_widths & opa_link_xlate[i].from)
9279 result |= opa_link_xlate[i].to;
9285 * Set link attributes before moving to polling.
9287 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9289 struct hfi1_devdata *dd = ppd->dd;
9291 u8 tx_polarity_inversion;
9292 u8 rx_polarity_inversion;
9295 /* reset our fabric serdes to clear any lingering problems */
9296 fabric_serdes_reset(dd);
9298 /* set the local tx rate - need to read-modify-write */
9299 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9300 &rx_polarity_inversion, &ppd->local_tx_rate);
9302 goto set_local_link_attributes_fail;
9304 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9305 /* set the tx rate to the fastest enabled */
9306 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9307 ppd->local_tx_rate = 1;
9309 ppd->local_tx_rate = 0;
9311 /* set the tx rate to all enabled */
9312 ppd->local_tx_rate = 0;
9313 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9314 ppd->local_tx_rate |= 2;
9315 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9316 ppd->local_tx_rate |= 1;
9319 enable_lane_tx = 0xF; /* enable all four lanes */
9320 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9321 rx_polarity_inversion, ppd->local_tx_rate);
9322 if (ret != HCMD_SUCCESS)
9323 goto set_local_link_attributes_fail;
9325 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9326 if (ret != HCMD_SUCCESS) {
9328 "Failed to set host interface version, return 0x%x\n",
9330 goto set_local_link_attributes_fail;
9334 * DC supports continuous updates.
9336 ret = write_vc_local_phy(dd,
9337 0 /* no power management */,
9338 1 /* continuous updates */);
9339 if (ret != HCMD_SUCCESS)
9340 goto set_local_link_attributes_fail;
9342 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9343 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9344 ppd->port_crc_mode_enabled);
9345 if (ret != HCMD_SUCCESS)
9346 goto set_local_link_attributes_fail;
9349 * SerDes loopback init sequence requires
9350 * setting bit 0 of MISC_CONFIG_BITS
9352 if (loopback == LOOPBACK_SERDES)
9353 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9356 * An external device configuration request is used to reset the LCB
9357 * to retry to obtain operational lanes when the first attempt is
9360 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9361 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9363 ret = write_vc_local_link_mode(dd, misc_bits, 0,
9364 opa_to_vc_link_widths(
9365 ppd->link_width_enabled));
9366 if (ret != HCMD_SUCCESS)
9367 goto set_local_link_attributes_fail;
9369 /* let peer know who we are */
9370 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9371 if (ret == HCMD_SUCCESS)
9374 set_local_link_attributes_fail:
9376 "Failed to set local link attributes, return 0x%x\n",
9382 * Call this to start the link.
9383 * Do not do anything if the link is disabled.
9384 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9386 int start_link(struct hfi1_pportdata *ppd)
9389 * Tune the SerDes to a ballpark setting for optimal signal and bit
9390 * error rate. Needs to be done before starting the link.
9394 if (!ppd->driver_link_ready) {
9395 dd_dev_info(ppd->dd,
9396 "%s: stopping link start because driver is not ready\n",
9402 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9403 * pkey table can be configured properly if the HFI unit is connected
9404 * to switch port with MgmtAllowed=NO
9406 clear_full_mgmt_pkey(ppd);
9408 return set_link_state(ppd, HLS_DN_POLL);
9411 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9413 struct hfi1_devdata *dd = ppd->dd;
9415 unsigned long timeout;
9418 * Some QSFP cables have a quirk that asserts the IntN line as a side
9419 * effect of power up on plug-in. We ignore this false positive
9420 * interrupt until the module has finished powering up by waiting for
9421 * a minimum timeout of the module inrush initialization time of
9422 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9423 * module have stabilized.
9428 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9430 timeout = jiffies + msecs_to_jiffies(2000);
9432 mask = read_csr(dd, dd->hfi1_id ?
9433 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9434 if (!(mask & QSFP_HFI0_INT_N))
9436 if (time_after(jiffies, timeout)) {
9437 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9445 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9447 struct hfi1_devdata *dd = ppd->dd;
9450 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9453 * Clear the status register to avoid an immediate interrupt
9454 * when we re-enable the IntN pin
9456 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9458 mask |= (u64)QSFP_HFI0_INT_N;
9460 mask &= ~(u64)QSFP_HFI0_INT_N;
9462 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9465 int reset_qsfp(struct hfi1_pportdata *ppd)
9467 struct hfi1_devdata *dd = ppd->dd;
9468 u64 mask, qsfp_mask;
9470 /* Disable INT_N from triggering QSFP interrupts */
9471 set_qsfp_int_n(ppd, 0);
9473 /* Reset the QSFP */
9474 mask = (u64)QSFP_HFI0_RESET_N;
9476 qsfp_mask = read_csr(dd,
9477 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9480 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9486 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9488 wait_for_qsfp_init(ppd);
9491 * Allow INT_N to trigger the QSFP interrupt to watch
9492 * for alarms and warnings
9494 set_qsfp_int_n(ppd, 1);
9497 * After the reset, AOC transmitters are enabled by default. They need
9498 * to be turned off to complete the QSFP setup before they can be
9501 return set_qsfp_tx(ppd, 0);
9504 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9505 u8 *qsfp_interrupt_status)
9507 struct hfi1_devdata *dd = ppd->dd;
9509 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9510 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9511 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9514 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9515 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9516 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9520 * The remaining alarms/warnings don't matter if the link is down.
9522 if (ppd->host_link_state & HLS_DOWN)
9525 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9526 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9527 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9530 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9531 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9532 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9535 /* Byte 2 is vendor specific */
9537 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9538 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9539 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9542 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9543 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9544 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9547 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9548 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9549 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9552 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9553 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9554 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9557 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9558 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9559 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9562 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9563 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9564 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9567 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9568 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9569 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9572 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9573 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9574 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9577 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9578 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9579 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9582 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9583 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9584 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9587 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9588 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9589 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9592 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9593 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9594 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9597 /* Bytes 9-10 and 11-12 are reserved */
9598 /* Bytes 13-15 are vendor specific */
9603 /* This routine will only be scheduled if the QSFP module present is asserted */
9604 void qsfp_event(struct work_struct *work)
9606 struct qsfp_data *qd;
9607 struct hfi1_pportdata *ppd;
9608 struct hfi1_devdata *dd;
9610 qd = container_of(work, struct qsfp_data, qsfp_work);
9615 if (!qsfp_mod_present(ppd))
9618 if (ppd->host_link_state == HLS_DN_DISABLE) {
9619 dd_dev_info(ppd->dd,
9620 "%s: stopping link start because link is disabled\n",
9626 * Turn DC back on after cable has been re-inserted. Up until
9627 * now, the DC has been in reset to save power.
9631 if (qd->cache_refresh_required) {
9632 set_qsfp_int_n(ppd, 0);
9634 wait_for_qsfp_init(ppd);
9637 * Allow INT_N to trigger the QSFP interrupt to watch
9638 * for alarms and warnings
9640 set_qsfp_int_n(ppd, 1);
9645 if (qd->check_interrupt_flags) {
9646 u8 qsfp_interrupt_status[16] = {0,};
9648 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9649 &qsfp_interrupt_status[0], 16) != 16) {
9651 "%s: Failed to read status of QSFP module\n",
9654 unsigned long flags;
9656 handle_qsfp_error_conditions(
9657 ppd, qsfp_interrupt_status);
9658 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9659 ppd->qsfp_info.check_interrupt_flags = 0;
9660 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9666 static void init_qsfp_int(struct hfi1_devdata *dd)
9668 struct hfi1_pportdata *ppd = dd->pport;
9669 u64 qsfp_mask, cce_int_mask;
9670 const int qsfp1_int_smask = QSFP1_INT % 64;
9671 const int qsfp2_int_smask = QSFP2_INT % 64;
9674 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9675 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9676 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9677 * the index of the appropriate CSR in the CCEIntMask CSR array
9679 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9680 (8 * (QSFP1_INT / 64)));
9682 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9683 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9686 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9687 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9691 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9692 /* Clear current status to avoid spurious interrupts */
9693 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9695 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9698 set_qsfp_int_n(ppd, 0);
9700 /* Handle active low nature of INT_N and MODPRST_N pins */
9701 if (qsfp_mod_present(ppd))
9702 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9704 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9709 * Do a one-time initialize of the LCB block.
9711 static void init_lcb(struct hfi1_devdata *dd)
9713 /* simulator does not correctly handle LCB cclk loopback, skip */
9714 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9717 /* the DC has been reset earlier in the driver load */
9719 /* set LCB for cclk loopback on the port */
9720 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9721 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9722 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9723 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9724 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9725 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9726 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9730 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9733 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9739 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9742 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9745 /* read byte 2, the status byte */
9746 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9752 return 0; /* success */
9756 * Values for QSFP retry.
9758 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9759 * arrived at from experience on a large cluster.
9761 #define MAX_QSFP_RETRIES 20
9762 #define QSFP_RETRY_WAIT 500 /* msec */
9765 * Try a QSFP read. If it fails, schedule a retry for later.
9766 * Called on first link activation after driver load.
9768 static void try_start_link(struct hfi1_pportdata *ppd)
9770 if (test_qsfp_read(ppd)) {
9772 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9773 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9776 dd_dev_info(ppd->dd,
9777 "QSFP not responding, waiting and retrying %d\n",
9778 (int)ppd->qsfp_retry_count);
9779 ppd->qsfp_retry_count++;
9780 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9781 msecs_to_jiffies(QSFP_RETRY_WAIT));
9784 ppd->qsfp_retry_count = 0;
9790 * Workqueue function to start the link after a delay.
9792 void handle_start_link(struct work_struct *work)
9794 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9795 start_link_work.work);
9796 try_start_link(ppd);
9799 int bringup_serdes(struct hfi1_pportdata *ppd)
9801 struct hfi1_devdata *dd = ppd->dd;
9805 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9806 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9808 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9811 guid = dd->base_guid + ppd->port - 1;
9812 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9815 /* Set linkinit_reason on power up per OPA spec */
9816 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9818 /* one-time init of the LCB */
9822 ret = init_loopback(dd);
9828 if (ppd->port_type == PORT_TYPE_QSFP) {
9829 set_qsfp_int_n(ppd, 0);
9830 wait_for_qsfp_init(ppd);
9831 set_qsfp_int_n(ppd, 1);
9834 try_start_link(ppd);
9838 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9840 struct hfi1_devdata *dd = ppd->dd;
9843 * Shut down the link and keep it down. First turn off that the
9844 * driver wants to allow the link to be up (driver_link_ready).
9845 * Then make sure the link is not automatically restarted
9846 * (link_enabled). Cancel any pending restart. And finally
9849 ppd->driver_link_ready = 0;
9850 ppd->link_enabled = 0;
9852 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9853 flush_delayed_work(&ppd->start_link_work);
9854 cancel_delayed_work_sync(&ppd->start_link_work);
9856 ppd->offline_disabled_reason =
9857 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9858 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9859 OPA_LINKDOWN_REASON_REBOOT);
9860 set_link_state(ppd, HLS_DN_OFFLINE);
9862 /* disable the port */
9863 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9864 cancel_work_sync(&ppd->freeze_work);
9867 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9869 struct hfi1_pportdata *ppd;
9872 ppd = (struct hfi1_pportdata *)(dd + 1);
9873 for (i = 0; i < dd->num_pports; i++, ppd++) {
9874 ppd->ibport_data.rvp.rc_acks = NULL;
9875 ppd->ibport_data.rvp.rc_qacks = NULL;
9876 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9877 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9878 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9879 if (!ppd->ibport_data.rvp.rc_acks ||
9880 !ppd->ibport_data.rvp.rc_delayed_comp ||
9881 !ppd->ibport_data.rvp.rc_qacks)
9889 * index is the index into the receive array
9891 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9892 u32 type, unsigned long pa, u16 order)
9896 if (!(dd->flags & HFI1_PRESENT))
9899 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9902 } else if (type > PT_INVALID) {
9904 "unexpected receive array type %u for index %u, not handled\n",
9908 trace_hfi1_put_tid(dd, index, type, pa, order);
9910 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9911 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9912 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9913 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9914 << RCV_ARRAY_RT_ADDR_SHIFT;
9915 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9916 writeq(reg, dd->rcvarray_wc + (index * 8));
9918 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9920 * Eager entries are written and flushed
9922 * Expected entries are flushed every 4 writes
9929 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9931 struct hfi1_devdata *dd = rcd->dd;
9934 /* this could be optimized */
9935 for (i = rcd->eager_base; i < rcd->eager_base +
9936 rcd->egrbufs.alloced; i++)
9937 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9939 for (i = rcd->expected_base;
9940 i < rcd->expected_base + rcd->expected_count; i++)
9941 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9944 static const char * const ib_cfg_name_strings[] = {
9945 "HFI1_IB_CFG_LIDLMC",
9946 "HFI1_IB_CFG_LWID_DG_ENB",
9947 "HFI1_IB_CFG_LWID_ENB",
9949 "HFI1_IB_CFG_SPD_ENB",
9951 "HFI1_IB_CFG_RXPOL_ENB",
9952 "HFI1_IB_CFG_LREV_ENB",
9953 "HFI1_IB_CFG_LINKLATENCY",
9954 "HFI1_IB_CFG_HRTBT",
9955 "HFI1_IB_CFG_OP_VLS",
9956 "HFI1_IB_CFG_VL_HIGH_CAP",
9957 "HFI1_IB_CFG_VL_LOW_CAP",
9958 "HFI1_IB_CFG_OVERRUN_THRESH",
9959 "HFI1_IB_CFG_PHYERR_THRESH",
9960 "HFI1_IB_CFG_LINKDEFAULT",
9961 "HFI1_IB_CFG_PKEYS",
9963 "HFI1_IB_CFG_LSTATE",
9964 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9965 "HFI1_IB_CFG_PMA_TICKS",
9969 static const char *ib_cfg_name(int which)
9971 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9973 return ib_cfg_name_strings[which];
9976 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9978 struct hfi1_devdata *dd = ppd->dd;
9982 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9983 val = ppd->link_width_enabled;
9985 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9986 val = ppd->link_width_active;
9988 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9989 val = ppd->link_speed_enabled;
9991 case HFI1_IB_CFG_SPD: /* current Link speed */
9992 val = ppd->link_speed_active;
9995 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9996 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9997 case HFI1_IB_CFG_LINKLATENCY:
10000 case HFI1_IB_CFG_OP_VLS:
10001 val = ppd->actual_vls_operational;
10003 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
10004 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10006 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
10007 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10009 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10010 val = ppd->overrun_threshold;
10012 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10013 val = ppd->phy_error_threshold;
10015 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10019 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10020 case HFI1_IB_CFG_PMA_TICKS:
10023 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10026 "%s: which %s: not implemented\n",
10028 ib_cfg_name(which));
10036 * The largest MAD packet size.
10038 #define MAX_MAD_PACKET 2048
10041 * Return the maximum header bytes that can go on the _wire_
10042 * for this device. This count includes the ICRC which is
10043 * not part of the packet held in memory but it is appended
10045 * This is dependent on the device's receive header entry size.
10046 * HFI allows this to be set per-receive context, but the
10047 * driver presently enforces a global value.
10049 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10052 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10053 * the Receive Header Entry Size minus the PBC (or RHF) size
10054 * plus one DW for the ICRC appended by HW.
10056 * dd->rcd[0].rcvhdrqentsize is in DW.
10057 * We use rcd[0] as all context will have the same value. Also,
10058 * the first kernel context would have been allocated by now so
10059 * we are guaranteed a valid value.
10061 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10066 * @ppd - per port data
10068 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10069 * registers compare against LRH.PktLen, so use the max bytes included
10072 * This routine changes all VL values except VL15, which it maintains at
10075 static void set_send_length(struct hfi1_pportdata *ppd)
10077 struct hfi1_devdata *dd = ppd->dd;
10078 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10079 u32 maxvlmtu = dd->vld[15].mtu;
10080 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10081 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10082 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10086 for (i = 0; i < ppd->vls_supported; i++) {
10087 if (dd->vld[i].mtu > maxvlmtu)
10088 maxvlmtu = dd->vld[i].mtu;
10090 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10091 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10092 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10094 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10095 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10096 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10098 write_csr(dd, SEND_LEN_CHECK0, len1);
10099 write_csr(dd, SEND_LEN_CHECK1, len2);
10100 /* adjust kernel credit return thresholds based on new MTUs */
10101 /* all kernel receive contexts have the same hdrqentsize */
10102 for (i = 0; i < ppd->vls_supported; i++) {
10103 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10104 sc_mtu_to_threshold(dd->vld[i].sc,
10106 dd->rcd[0]->rcvhdrqentsize));
10107 for (j = 0; j < INIT_SC_PER_VL; j++)
10108 sc_set_cr_threshold(
10109 pio_select_send_context_vl(dd, j, i),
10112 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10113 sc_mtu_to_threshold(dd->vld[15].sc,
10115 dd->rcd[0]->rcvhdrqentsize));
10116 sc_set_cr_threshold(dd->vld[15].sc, thres);
10118 /* Adjust maximum MTU for the port in DC */
10119 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10120 (ilog2(maxvlmtu >> 8) + 1);
10121 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10122 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10123 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10124 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10125 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10128 static void set_lidlmc(struct hfi1_pportdata *ppd)
10132 struct hfi1_devdata *dd = ppd->dd;
10133 u32 mask = ~((1U << ppd->lmc) - 1);
10134 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10138 * Program 0 in CSR if port lid is extended. This prevents
10139 * 9B packets being sent out for large lids.
10141 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10142 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10143 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10144 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10145 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10146 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10147 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10148 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10151 * Iterate over all the send contexts and set their SLID check
10153 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10154 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10155 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10156 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10158 for (i = 0; i < chip_send_contexts(dd); i++) {
10159 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10161 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10164 /* Now we have to do the same thing for the sdma engines */
10165 sdma_update_lmc(dd, mask, lid);
10168 static const char *state_completed_string(u32 completed)
10170 static const char * const state_completed[] = {
10176 if (completed < ARRAY_SIZE(state_completed))
10177 return state_completed[completed];
10182 static const char all_lanes_dead_timeout_expired[] =
10183 "All lanes were inactive – was the interconnect media removed?";
10184 static const char tx_out_of_policy[] =
10185 "Passing lanes on local port do not meet the local link width policy";
10186 static const char no_state_complete[] =
10187 "State timeout occurred before link partner completed the state";
10188 static const char * const state_complete_reasons[] = {
10189 [0x00] = "Reason unknown",
10190 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10191 [0x02] = "Link partner reported failure",
10192 [0x10] = "Unable to achieve frame sync on any lane",
10194 "Unable to find a common bit rate with the link partner",
10196 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10198 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10199 [0x14] = no_state_complete,
10201 "State timeout occurred before link partner identified equalization presets",
10203 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10204 [0x17] = tx_out_of_policy,
10205 [0x20] = all_lanes_dead_timeout_expired,
10207 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10208 [0x22] = no_state_complete,
10210 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10211 [0x24] = tx_out_of_policy,
10212 [0x30] = all_lanes_dead_timeout_expired,
10214 "State timeout occurred waiting for host to process received frames",
10215 [0x32] = no_state_complete,
10217 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10218 [0x34] = tx_out_of_policy,
10219 [0x35] = "Negotiated link width is mutually exclusive",
10221 "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10222 [0x37] = "Unable to resolve secure data exchange",
10225 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10228 const char *str = NULL;
10230 if (code < ARRAY_SIZE(state_complete_reasons))
10231 str = state_complete_reasons[code];
10238 /* describe the given last state complete frame */
10239 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10240 const char *prefix)
10242 struct hfi1_devdata *dd = ppd->dd;
10250 * [ 0: 0] - success
10252 * [ 7: 4] - next state timeout
10253 * [15: 8] - reason code
10256 success = frame & 0x1;
10257 state = (frame >> 1) & 0x7;
10258 reason = (frame >> 8) & 0xff;
10259 lanes = (frame >> 16) & 0xffff;
10261 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10263 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10264 state_completed_string(state), state);
10265 dd_dev_err(dd, " state successfully completed: %s\n",
10266 success ? "yes" : "no");
10267 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10268 reason, state_complete_reason_code_string(ppd, reason));
10269 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10273 * Read the last state complete frames and explain them. This routine
10274 * expects to be called if the link went down during link negotiation
10275 * and initialization (LNI). That is, anywhere between polling and link up.
10277 static void check_lni_states(struct hfi1_pportdata *ppd)
10279 u32 last_local_state;
10280 u32 last_remote_state;
10282 read_last_local_state(ppd->dd, &last_local_state);
10283 read_last_remote_state(ppd->dd, &last_remote_state);
10286 * Don't report anything if there is nothing to report. A value of
10287 * 0 means the link was taken down while polling and there was no
10288 * training in-process.
10290 if (last_local_state == 0 && last_remote_state == 0)
10293 decode_state_complete(ppd, last_local_state, "transmitted");
10294 decode_state_complete(ppd, last_remote_state, "received");
10297 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10298 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10301 unsigned long timeout;
10303 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10304 timeout = jiffies + msecs_to_jiffies(wait_ms);
10306 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10309 if (time_after(jiffies, timeout)) {
10311 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10319 /* called when the logical link state is not down as it should be */
10320 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10322 struct hfi1_devdata *dd = ppd->dd;
10325 * Bring link up in LCB loopback
10327 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10328 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10329 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10331 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10332 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10333 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10334 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10336 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10337 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10339 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10340 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10342 wait_link_transfer_active(dd, 100);
10345 * Bring the link down again.
10347 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10348 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10349 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10351 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10355 * Helper for set_link_state(). Do not call except from that routine.
10356 * Expects ppd->hls_mutex to be held.
10358 * @rem_reason value to be sent to the neighbor
10360 * LinkDownReasons only set if transition succeeds.
10362 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10364 struct hfi1_devdata *dd = ppd->dd;
10365 u32 previous_state;
10366 int offline_state_ret;
10369 update_lcb_cache(dd);
10371 previous_state = ppd->host_link_state;
10372 ppd->host_link_state = HLS_GOING_OFFLINE;
10374 /* start offline transition */
10375 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10377 if (ret != HCMD_SUCCESS) {
10379 "Failed to transition to Offline link state, return %d\n",
10383 if (ppd->offline_disabled_reason ==
10384 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10385 ppd->offline_disabled_reason =
10386 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10388 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10389 if (offline_state_ret < 0)
10390 return offline_state_ret;
10392 /* Disabling AOC transmitters */
10393 if (ppd->port_type == PORT_TYPE_QSFP &&
10394 ppd->qsfp_info.limiting_active &&
10395 qsfp_mod_present(ppd)) {
10398 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10400 set_qsfp_tx(ppd, 0);
10401 release_chip_resource(dd, qsfp_resource(dd));
10403 /* not fatal, but should warn */
10405 "Unable to acquire lock to turn off QSFP TX\n");
10410 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10411 * can take a while for the link to go down.
10413 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10414 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10420 * Now in charge of LCB - must be after the physical state is
10421 * offline.quiet and before host_link_state is changed.
10423 set_host_lcb_access(dd);
10424 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10426 /* make sure the logical state is also down */
10427 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10429 force_logical_link_state_down(ppd);
10431 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10432 update_statusp(ppd, IB_PORT_DOWN);
10435 * The LNI has a mandatory wait time after the physical state
10436 * moves to Offline.Quiet. The wait time may be different
10437 * depending on how the link went down. The 8051 firmware
10438 * will observe the needed wait time and only move to ready
10439 * when that is completed. The largest of the quiet timeouts
10440 * is 6s, so wait that long and then at least 0.5s more for
10441 * other transitions, and another 0.5s for a buffer.
10443 ret = wait_fm_ready(dd, 7000);
10446 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10447 /* state is really offline, so make it so */
10448 ppd->host_link_state = HLS_DN_OFFLINE;
10453 * The state is now offline and the 8051 is ready to accept host
10455 * - change our state
10456 * - notify others if we were previously in a linkup state
10458 ppd->host_link_state = HLS_DN_OFFLINE;
10459 if (previous_state & HLS_UP) {
10460 /* went down while link was up */
10461 handle_linkup_change(dd, 0);
10462 } else if (previous_state
10463 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10464 /* went down while attempting link up */
10465 check_lni_states(ppd);
10467 /* The QSFP doesn't need to be reset on LNI failure */
10468 ppd->qsfp_info.reset_needed = 0;
10471 /* the active link width (downgrade) is 0 on link down */
10472 ppd->link_width_active = 0;
10473 ppd->link_width_downgrade_tx_active = 0;
10474 ppd->link_width_downgrade_rx_active = 0;
10475 ppd->current_egress_rate = 0;
10479 /* return the link state name */
10480 static const char *link_state_name(u32 state)
10483 int n = ilog2(state);
10484 static const char * const names[] = {
10485 [__HLS_UP_INIT_BP] = "INIT",
10486 [__HLS_UP_ARMED_BP] = "ARMED",
10487 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10488 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10489 [__HLS_DN_POLL_BP] = "POLL",
10490 [__HLS_DN_DISABLE_BP] = "DISABLE",
10491 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10492 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10493 [__HLS_GOING_UP_BP] = "GOING_UP",
10494 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10495 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10498 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10499 return name ? name : "unknown";
10502 /* return the link state reason name */
10503 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10505 if (state == HLS_UP_INIT) {
10506 switch (ppd->linkinit_reason) {
10507 case OPA_LINKINIT_REASON_LINKUP:
10509 case OPA_LINKINIT_REASON_FLAPPING:
10510 return "(FLAPPING)";
10511 case OPA_LINKINIT_OUTSIDE_POLICY:
10512 return "(OUTSIDE_POLICY)";
10513 case OPA_LINKINIT_QUARANTINED:
10514 return "(QUARANTINED)";
10515 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10516 return "(INSUFIC_CAPABILITY)";
10525 * driver_pstate - convert the driver's notion of a port's
10526 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10527 * Return -1 (converted to a u32) to indicate error.
10529 u32 driver_pstate(struct hfi1_pportdata *ppd)
10531 switch (ppd->host_link_state) {
10534 case HLS_UP_ACTIVE:
10535 return IB_PORTPHYSSTATE_LINKUP;
10537 return IB_PORTPHYSSTATE_POLLING;
10538 case HLS_DN_DISABLE:
10539 return IB_PORTPHYSSTATE_DISABLED;
10540 case HLS_DN_OFFLINE:
10541 return OPA_PORTPHYSSTATE_OFFLINE;
10542 case HLS_VERIFY_CAP:
10543 return IB_PORTPHYSSTATE_TRAINING;
10545 return IB_PORTPHYSSTATE_TRAINING;
10546 case HLS_GOING_OFFLINE:
10547 return OPA_PORTPHYSSTATE_OFFLINE;
10548 case HLS_LINK_COOLDOWN:
10549 return OPA_PORTPHYSSTATE_OFFLINE;
10550 case HLS_DN_DOWNDEF:
10552 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10553 ppd->host_link_state);
10559 * driver_lstate - convert the driver's notion of a port's
10560 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10561 * (converted to a u32) to indicate error.
10563 u32 driver_lstate(struct hfi1_pportdata *ppd)
10565 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10566 return IB_PORT_DOWN;
10568 switch (ppd->host_link_state & HLS_UP) {
10570 return IB_PORT_INIT;
10572 return IB_PORT_ARMED;
10573 case HLS_UP_ACTIVE:
10574 return IB_PORT_ACTIVE;
10576 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10577 ppd->host_link_state);
10582 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10583 u8 neigh_reason, u8 rem_reason)
10585 if (ppd->local_link_down_reason.latest == 0 &&
10586 ppd->neigh_link_down_reason.latest == 0) {
10587 ppd->local_link_down_reason.latest = lcl_reason;
10588 ppd->neigh_link_down_reason.latest = neigh_reason;
10589 ppd->remote_link_down_reason = rem_reason;
10594 * data_vls_operational() - Verify if data VL BCT credits and MTU
10596 * @ppd: pointer to hfi1_pportdata structure
10598 * Return: true - Ok, false -otherwise.
10600 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10605 if (!ppd->actual_vls_operational)
10608 for (i = 0; i < ppd->vls_supported; i++) {
10609 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10610 if ((reg && !ppd->dd->vld[i].mtu) ||
10611 (!reg && ppd->dd->vld[i].mtu))
10619 * Change the physical and/or logical link state.
10621 * Do not call this routine while inside an interrupt. It contains
10622 * calls to routines that can take multiple seconds to finish.
10624 * Returns 0 on success, -errno on failure.
10626 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10628 struct hfi1_devdata *dd = ppd->dd;
10629 struct ib_event event = {.device = NULL};
10631 int orig_new_state, poll_bounce;
10633 mutex_lock(&ppd->hls_lock);
10635 orig_new_state = state;
10636 if (state == HLS_DN_DOWNDEF)
10637 state = HLS_DEFAULT;
10639 /* interpret poll -> poll as a link bounce */
10640 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10641 state == HLS_DN_POLL;
10643 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10644 link_state_name(ppd->host_link_state),
10645 link_state_name(orig_new_state),
10646 poll_bounce ? "(bounce) " : "",
10647 link_state_reason_name(ppd, state));
10650 * If we're going to a (HLS_*) link state that implies the logical
10651 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10652 * reset is_sm_config_started to 0.
10654 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10655 ppd->is_sm_config_started = 0;
10658 * Do nothing if the states match. Let a poll to poll link bounce
10661 if (ppd->host_link_state == state && !poll_bounce)
10666 if (ppd->host_link_state == HLS_DN_POLL &&
10667 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10669 * Quick link up jumps from polling to here.
10671 * Whether in normal or loopback mode, the
10672 * simulator jumps from polling to link up.
10673 * Accept that here.
10676 } else if (ppd->host_link_state != HLS_GOING_UP) {
10681 * Wait for Link_Up physical state.
10682 * Physical and Logical states should already be
10683 * be transitioned to LinkUp and LinkInit respectively.
10685 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10688 "%s: physical state did not change to LINK-UP\n",
10693 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10696 "%s: logical state did not change to INIT\n",
10701 /* clear old transient LINKINIT_REASON code */
10702 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10703 ppd->linkinit_reason =
10704 OPA_LINKINIT_REASON_LINKUP;
10706 /* enable the port */
10707 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10709 handle_linkup_change(dd, 1);
10710 pio_kernel_linkup(dd);
10713 * After link up, a new link width will have been set.
10714 * Update the xmit counters with regards to the new
10717 update_xmit_counters(ppd, ppd->link_width_active);
10719 ppd->host_link_state = HLS_UP_INIT;
10720 update_statusp(ppd, IB_PORT_INIT);
10723 if (ppd->host_link_state != HLS_UP_INIT)
10726 if (!data_vls_operational(ppd)) {
10728 "%s: Invalid data VL credits or mtu\n",
10734 set_logical_state(dd, LSTATE_ARMED);
10735 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10738 "%s: logical state did not change to ARMED\n",
10742 ppd->host_link_state = HLS_UP_ARMED;
10743 update_statusp(ppd, IB_PORT_ARMED);
10745 * The simulator does not currently implement SMA messages,
10746 * so neighbor_normal is not set. Set it here when we first
10749 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10750 ppd->neighbor_normal = 1;
10752 case HLS_UP_ACTIVE:
10753 if (ppd->host_link_state != HLS_UP_ARMED)
10756 set_logical_state(dd, LSTATE_ACTIVE);
10757 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10760 "%s: logical state did not change to ACTIVE\n",
10763 /* tell all engines to go running */
10764 sdma_all_running(dd);
10765 ppd->host_link_state = HLS_UP_ACTIVE;
10766 update_statusp(ppd, IB_PORT_ACTIVE);
10768 /* Signal the IB layer that the port has went active */
10769 event.device = &dd->verbs_dev.rdi.ibdev;
10770 event.element.port_num = ppd->port;
10771 event.event = IB_EVENT_PORT_ACTIVE;
10775 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10776 ppd->host_link_state == HLS_DN_OFFLINE) &&
10779 /* Hand LED control to the DC */
10780 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10782 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10783 u8 tmp = ppd->link_enabled;
10785 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10787 ppd->link_enabled = tmp;
10790 ppd->remote_link_down_reason = 0;
10792 if (ppd->driver_link_ready)
10793 ppd->link_enabled = 1;
10796 set_all_slowpath(ppd->dd);
10797 ret = set_local_link_attributes(ppd);
10801 ppd->port_error_action = 0;
10803 if (quick_linkup) {
10804 /* quick linkup does not go into polling */
10805 ret = do_quick_linkup(dd);
10807 ret1 = set_physical_link_state(dd, PLS_POLLING);
10809 ret1 = wait_phys_link_out_of_offline(ppd,
10811 if (ret1 != HCMD_SUCCESS) {
10813 "Failed to transition to Polling link state, return 0x%x\n",
10820 * Change the host link state after requesting DC8051 to
10821 * change its physical state so that we can ignore any
10822 * interrupt with stale LNI(XX) error, which will not be
10823 * cleared until DC8051 transitions to Polling state.
10825 ppd->host_link_state = HLS_DN_POLL;
10826 ppd->offline_disabled_reason =
10827 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10829 * If an error occurred above, go back to offline. The
10830 * caller may reschedule another attempt.
10833 goto_offline(ppd, 0);
10835 log_physical_state(ppd, PLS_POLLING);
10837 case HLS_DN_DISABLE:
10838 /* link is disabled */
10839 ppd->link_enabled = 0;
10841 /* allow any state to transition to disabled */
10843 /* must transition to offline first */
10844 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10845 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10848 ppd->remote_link_down_reason = 0;
10851 if (!dd->dc_shutdown) {
10852 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10853 if (ret1 != HCMD_SUCCESS) {
10855 "Failed to transition to Disabled link state, return 0x%x\n",
10860 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10863 "%s: physical state did not change to DISABLED\n",
10869 ppd->host_link_state = HLS_DN_DISABLE;
10871 case HLS_DN_OFFLINE:
10872 if (ppd->host_link_state == HLS_DN_DISABLE)
10875 /* allow any state to transition to offline */
10876 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10878 ppd->remote_link_down_reason = 0;
10880 case HLS_VERIFY_CAP:
10881 if (ppd->host_link_state != HLS_DN_POLL)
10883 ppd->host_link_state = HLS_VERIFY_CAP;
10884 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10887 if (ppd->host_link_state != HLS_VERIFY_CAP)
10890 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10891 if (ret1 != HCMD_SUCCESS) {
10893 "Failed to transition to link up state, return 0x%x\n",
10898 ppd->host_link_state = HLS_GOING_UP;
10901 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10902 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10904 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10913 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10914 __func__, link_state_name(ppd->host_link_state),
10915 link_state_name(state));
10919 mutex_unlock(&ppd->hls_lock);
10922 ib_dispatch_event(&event);
10927 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10933 case HFI1_IB_CFG_LIDLMC:
10936 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10938 * The VL Arbitrator high limit is sent in units of 4k
10939 * bytes, while HFI stores it in units of 64 bytes.
10942 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10943 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10944 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10946 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10947 /* HFI only supports POLL as the default link down state */
10948 if (val != HLS_DN_POLL)
10951 case HFI1_IB_CFG_OP_VLS:
10952 if (ppd->vls_operational != val) {
10953 ppd->vls_operational = val;
10959 * For link width, link width downgrade, and speed enable, always AND
10960 * the setting with what is actually supported. This has two benefits.
10961 * First, enabled can't have unsupported values, no matter what the
10962 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10963 * "fill in with your supported value" have all the bits in the
10964 * field set, so simply ANDing with supported has the desired result.
10966 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10967 ppd->link_width_enabled = val & ppd->link_width_supported;
10969 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10970 ppd->link_width_downgrade_enabled =
10971 val & ppd->link_width_downgrade_supported;
10973 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10974 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10976 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10978 * HFI does not follow IB specs, save this value
10979 * so we can report it, if asked.
10981 ppd->overrun_threshold = val;
10983 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10985 * HFI does not follow IB specs, save this value
10986 * so we can report it, if asked.
10988 ppd->phy_error_threshold = val;
10991 case HFI1_IB_CFG_MTU:
10992 set_send_length(ppd);
10995 case HFI1_IB_CFG_PKEYS:
10996 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10997 set_partition_keys(ppd);
11001 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11002 dd_dev_info(ppd->dd,
11003 "%s: which %s, val 0x%x: not implemented\n",
11004 __func__, ib_cfg_name(which), val);
11010 /* begin functions related to vl arbitration table caching */
11011 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11015 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11016 VL_ARB_LOW_PRIO_TABLE_SIZE);
11017 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11018 VL_ARB_HIGH_PRIO_TABLE_SIZE);
11021 * Note that we always return values directly from the
11022 * 'vl_arb_cache' (and do no CSR reads) in response to a
11023 * 'Get(VLArbTable)'. This is obviously correct after a
11024 * 'Set(VLArbTable)', since the cache will then be up to
11025 * date. But it's also correct prior to any 'Set(VLArbTable)'
11026 * since then both the cache, and the relevant h/w registers
11030 for (i = 0; i < MAX_PRIO_TABLE; i++)
11031 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11035 * vl_arb_lock_cache
11037 * All other vl_arb_* functions should be called only after locking
11040 static inline struct vl_arb_cache *
11041 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11043 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11045 spin_lock(&ppd->vl_arb_cache[idx].lock);
11046 return &ppd->vl_arb_cache[idx];
11049 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11051 spin_unlock(&ppd->vl_arb_cache[idx].lock);
11054 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11055 struct ib_vl_weight_elem *vl)
11057 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11060 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11061 struct ib_vl_weight_elem *vl)
11063 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11066 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11067 struct ib_vl_weight_elem *vl)
11069 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11072 /* end functions related to vl arbitration table caching */
11074 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11075 u32 size, struct ib_vl_weight_elem *vl)
11077 struct hfi1_devdata *dd = ppd->dd;
11079 unsigned int i, is_up = 0;
11080 int drain, ret = 0;
11082 mutex_lock(&ppd->hls_lock);
11084 if (ppd->host_link_state & HLS_UP)
11087 drain = !is_ax(dd) && is_up;
11091 * Before adjusting VL arbitration weights, empty per-VL
11092 * FIFOs, otherwise a packet whose VL weight is being
11093 * set to 0 could get stuck in a FIFO with no chance to
11096 ret = stop_drain_data_vls(dd);
11101 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11106 for (i = 0; i < size; i++, vl++) {
11108 * NOTE: The low priority shift and mask are used here, but
11109 * they are the same for both the low and high registers.
11111 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11112 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11113 | (((u64)vl->weight
11114 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11115 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11116 write_csr(dd, target + (i * 8), reg);
11118 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11121 open_fill_data_vls(dd); /* reopen all VLs */
11124 mutex_unlock(&ppd->hls_lock);
11130 * Read one credit merge VL register.
11132 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11133 struct vl_limit *vll)
11135 u64 reg = read_csr(dd, csr);
11137 vll->dedicated = cpu_to_be16(
11138 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11139 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11140 vll->shared = cpu_to_be16(
11141 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11142 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11146 * Read the current credit merge limits.
11148 static int get_buffer_control(struct hfi1_devdata *dd,
11149 struct buffer_control *bc, u16 *overall_limit)
11154 /* not all entries are filled in */
11155 memset(bc, 0, sizeof(*bc));
11157 /* OPA and HFI have a 1-1 mapping */
11158 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11159 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11161 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11162 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11164 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11165 bc->overall_shared_limit = cpu_to_be16(
11166 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11167 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11169 *overall_limit = (reg
11170 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11171 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11172 return sizeof(struct buffer_control);
11175 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11180 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11181 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11182 for (i = 0; i < sizeof(u64); i++) {
11183 u8 byte = *(((u8 *)®) + i);
11185 dp->vlnt[2 * i] = byte & 0xf;
11186 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11189 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11190 for (i = 0; i < sizeof(u64); i++) {
11191 u8 byte = *(((u8 *)®) + i);
11193 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11194 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11196 return sizeof(struct sc2vlnt);
11199 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11200 struct ib_vl_weight_elem *vl)
11204 for (i = 0; i < nelems; i++, vl++) {
11210 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11212 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11214 0, dp->vlnt[0] & 0xf,
11215 1, dp->vlnt[1] & 0xf,
11216 2, dp->vlnt[2] & 0xf,
11217 3, dp->vlnt[3] & 0xf,
11218 4, dp->vlnt[4] & 0xf,
11219 5, dp->vlnt[5] & 0xf,
11220 6, dp->vlnt[6] & 0xf,
11221 7, dp->vlnt[7] & 0xf,
11222 8, dp->vlnt[8] & 0xf,
11223 9, dp->vlnt[9] & 0xf,
11224 10, dp->vlnt[10] & 0xf,
11225 11, dp->vlnt[11] & 0xf,
11226 12, dp->vlnt[12] & 0xf,
11227 13, dp->vlnt[13] & 0xf,
11228 14, dp->vlnt[14] & 0xf,
11229 15, dp->vlnt[15] & 0xf));
11230 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11231 DC_SC_VL_VAL(31_16,
11232 16, dp->vlnt[16] & 0xf,
11233 17, dp->vlnt[17] & 0xf,
11234 18, dp->vlnt[18] & 0xf,
11235 19, dp->vlnt[19] & 0xf,
11236 20, dp->vlnt[20] & 0xf,
11237 21, dp->vlnt[21] & 0xf,
11238 22, dp->vlnt[22] & 0xf,
11239 23, dp->vlnt[23] & 0xf,
11240 24, dp->vlnt[24] & 0xf,
11241 25, dp->vlnt[25] & 0xf,
11242 26, dp->vlnt[26] & 0xf,
11243 27, dp->vlnt[27] & 0xf,
11244 28, dp->vlnt[28] & 0xf,
11245 29, dp->vlnt[29] & 0xf,
11246 30, dp->vlnt[30] & 0xf,
11247 31, dp->vlnt[31] & 0xf));
11250 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11254 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11255 what, (int)limit, idx);
11258 /* change only the shared limit portion of SendCmGLobalCredit */
11259 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11263 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11264 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11265 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11266 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11269 /* change only the total credit limit portion of SendCmGLobalCredit */
11270 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11274 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11275 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11276 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11277 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11280 /* set the given per-VL shared limit */
11281 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11286 if (vl < TXE_NUM_DATA_VL)
11287 addr = SEND_CM_CREDIT_VL + (8 * vl);
11289 addr = SEND_CM_CREDIT_VL15;
11291 reg = read_csr(dd, addr);
11292 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11293 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11294 write_csr(dd, addr, reg);
11297 /* set the given per-VL dedicated limit */
11298 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11303 if (vl < TXE_NUM_DATA_VL)
11304 addr = SEND_CM_CREDIT_VL + (8 * vl);
11306 addr = SEND_CM_CREDIT_VL15;
11308 reg = read_csr(dd, addr);
11309 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11310 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11311 write_csr(dd, addr, reg);
11314 /* spin until the given per-VL status mask bits clear */
11315 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11318 unsigned long timeout;
11321 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11323 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11326 return; /* success */
11327 if (time_after(jiffies, timeout))
11328 break; /* timed out */
11333 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11334 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11336 * If this occurs, it is likely there was a credit loss on the link.
11337 * The only recovery from that is a link bounce.
11340 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11344 * The number of credits on the VLs may be changed while everything
11345 * is "live", but the following algorithm must be followed due to
11346 * how the hardware is actually implemented. In particular,
11347 * Return_Credit_Status[] is the only correct status check.
11349 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11350 * set Global_Shared_Credit_Limit = 0
11352 * mask0 = all VLs that are changing either dedicated or shared limits
11353 * set Shared_Limit[mask0] = 0
11354 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11355 * if (changing any dedicated limit)
11356 * mask1 = all VLs that are lowering dedicated limits
11357 * lower Dedicated_Limit[mask1]
11358 * spin until Return_Credit_Status[mask1] == 0
11359 * raise Dedicated_Limits
11360 * raise Shared_Limits
11361 * raise Global_Shared_Credit_Limit
11363 * lower = if the new limit is lower, set the limit to the new value
11364 * raise = if the new limit is higher than the current value (may be changed
11365 * earlier in the algorithm), set the new limit to the new value
11367 int set_buffer_control(struct hfi1_pportdata *ppd,
11368 struct buffer_control *new_bc)
11370 struct hfi1_devdata *dd = ppd->dd;
11371 u64 changing_mask, ld_mask, stat_mask;
11373 int i, use_all_mask;
11374 int this_shared_changing;
11375 int vl_count = 0, ret;
11377 * A0: add the variable any_shared_limit_changing below and in the
11378 * algorithm above. If removing A0 support, it can be removed.
11380 int any_shared_limit_changing;
11381 struct buffer_control cur_bc;
11382 u8 changing[OPA_MAX_VLS];
11383 u8 lowering_dedicated[OPA_MAX_VLS];
11386 const u64 all_mask =
11387 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11388 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11389 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11390 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11391 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11392 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11393 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11394 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11395 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11397 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11398 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11400 /* find the new total credits, do sanity check on unused VLs */
11401 for (i = 0; i < OPA_MAX_VLS; i++) {
11403 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11406 nonzero_msg(dd, i, "dedicated",
11407 be16_to_cpu(new_bc->vl[i].dedicated));
11408 nonzero_msg(dd, i, "shared",
11409 be16_to_cpu(new_bc->vl[i].shared));
11410 new_bc->vl[i].dedicated = 0;
11411 new_bc->vl[i].shared = 0;
11413 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11415 /* fetch the current values */
11416 get_buffer_control(dd, &cur_bc, &cur_total);
11419 * Create the masks we will use.
11421 memset(changing, 0, sizeof(changing));
11422 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11424 * NOTE: Assumes that the individual VL bits are adjacent and in
11428 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11432 any_shared_limit_changing = 0;
11433 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11436 this_shared_changing = new_bc->vl[i].shared
11437 != cur_bc.vl[i].shared;
11438 if (this_shared_changing)
11439 any_shared_limit_changing = 1;
11440 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11441 this_shared_changing) {
11443 changing_mask |= stat_mask;
11446 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11447 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11448 lowering_dedicated[i] = 1;
11449 ld_mask |= stat_mask;
11453 /* bracket the credit change with a total adjustment */
11454 if (new_total > cur_total)
11455 set_global_limit(dd, new_total);
11458 * Start the credit change algorithm.
11461 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11462 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11463 (is_ax(dd) && any_shared_limit_changing)) {
11464 set_global_shared(dd, 0);
11465 cur_bc.overall_shared_limit = 0;
11469 for (i = 0; i < NUM_USABLE_VLS; i++) {
11474 set_vl_shared(dd, i, 0);
11475 cur_bc.vl[i].shared = 0;
11479 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11482 if (change_count > 0) {
11483 for (i = 0; i < NUM_USABLE_VLS; i++) {
11487 if (lowering_dedicated[i]) {
11488 set_vl_dedicated(dd, i,
11489 be16_to_cpu(new_bc->
11491 cur_bc.vl[i].dedicated =
11492 new_bc->vl[i].dedicated;
11496 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11498 /* now raise all dedicated that are going up */
11499 for (i = 0; i < NUM_USABLE_VLS; i++) {
11503 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11504 be16_to_cpu(cur_bc.vl[i].dedicated))
11505 set_vl_dedicated(dd, i,
11506 be16_to_cpu(new_bc->
11511 /* next raise all shared that are going up */
11512 for (i = 0; i < NUM_USABLE_VLS; i++) {
11516 if (be16_to_cpu(new_bc->vl[i].shared) >
11517 be16_to_cpu(cur_bc.vl[i].shared))
11518 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11521 /* finally raise the global shared */
11522 if (be16_to_cpu(new_bc->overall_shared_limit) >
11523 be16_to_cpu(cur_bc.overall_shared_limit))
11524 set_global_shared(dd,
11525 be16_to_cpu(new_bc->overall_shared_limit));
11527 /* bracket the credit change with a total adjustment */
11528 if (new_total < cur_total)
11529 set_global_limit(dd, new_total);
11532 * Determine the actual number of operational VLS using the number of
11533 * dedicated and shared credits for each VL.
11535 if (change_count > 0) {
11536 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11537 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11538 be16_to_cpu(new_bc->vl[i].shared) > 0)
11540 ppd->actual_vls_operational = vl_count;
11541 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11542 ppd->actual_vls_operational :
11543 ppd->vls_operational,
11546 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11547 ppd->actual_vls_operational :
11548 ppd->vls_operational, NULL);
11556 * Read the given fabric manager table. Return the size of the
11557 * table (in bytes) on success, and a negative error code on
11560 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11564 struct vl_arb_cache *vlc;
11567 case FM_TBL_VL_HIGH_ARB:
11570 * OPA specifies 128 elements (of 2 bytes each), though
11571 * HFI supports only 16 elements in h/w.
11573 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11574 vl_arb_get_cache(vlc, t);
11575 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11577 case FM_TBL_VL_LOW_ARB:
11580 * OPA specifies 128 elements (of 2 bytes each), though
11581 * HFI supports only 16 elements in h/w.
11583 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11584 vl_arb_get_cache(vlc, t);
11585 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11587 case FM_TBL_BUFFER_CONTROL:
11588 size = get_buffer_control(ppd->dd, t, NULL);
11590 case FM_TBL_SC2VLNT:
11591 size = get_sc2vlnt(ppd->dd, t);
11593 case FM_TBL_VL_PREEMPT_ELEMS:
11595 /* OPA specifies 128 elements, of 2 bytes each */
11596 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11598 case FM_TBL_VL_PREEMPT_MATRIX:
11601 * OPA specifies that this is the same size as the VL
11602 * arbitration tables (i.e., 256 bytes).
11612 * Write the given fabric manager table.
11614 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11617 struct vl_arb_cache *vlc;
11620 case FM_TBL_VL_HIGH_ARB:
11621 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11622 if (vl_arb_match_cache(vlc, t)) {
11623 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11626 vl_arb_set_cache(vlc, t);
11627 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11628 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11629 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11631 case FM_TBL_VL_LOW_ARB:
11632 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11633 if (vl_arb_match_cache(vlc, t)) {
11634 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11637 vl_arb_set_cache(vlc, t);
11638 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11639 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11640 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11642 case FM_TBL_BUFFER_CONTROL:
11643 ret = set_buffer_control(ppd, t);
11645 case FM_TBL_SC2VLNT:
11646 set_sc2vlnt(ppd->dd, t);
11655 * Disable all data VLs.
11657 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11659 static int disable_data_vls(struct hfi1_devdata *dd)
11664 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11670 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11671 * Just re-enables all data VLs (the "fill" part happens
11672 * automatically - the name was chosen for symmetry with
11673 * stop_drain_data_vls()).
11675 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11677 int open_fill_data_vls(struct hfi1_devdata *dd)
11682 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11688 * drain_data_vls() - assumes that disable_data_vls() has been called,
11689 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11690 * engines to drop to 0.
11692 static void drain_data_vls(struct hfi1_devdata *dd)
11696 pause_for_credit_return(dd);
11700 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11702 * Use open_fill_data_vls() to resume using data VLs. This pair is
11703 * meant to be used like this:
11705 * stop_drain_data_vls(dd);
11706 * // do things with per-VL resources
11707 * open_fill_data_vls(dd);
11709 int stop_drain_data_vls(struct hfi1_devdata *dd)
11713 ret = disable_data_vls(dd);
11715 drain_data_vls(dd);
11721 * Convert a nanosecond time to a cclock count. No matter how slow
11722 * the cclock, a non-zero ns will always have a non-zero result.
11724 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11728 if (dd->icode == ICODE_FPGA_EMULATION)
11729 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11730 else /* simulation pretends to be ASIC */
11731 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11732 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11738 * Convert a cclock count to nanoseconds. Not matter how slow
11739 * the cclock, a non-zero cclocks will always have a non-zero result.
11741 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11745 if (dd->icode == ICODE_FPGA_EMULATION)
11746 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11747 else /* simulation pretends to be ASIC */
11748 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11749 if (cclocks && !ns)
11755 * Dynamically adjust the receive interrupt timeout for a context based on
11756 * incoming packet rate.
11758 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11760 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11762 struct hfi1_devdata *dd = rcd->dd;
11763 u32 timeout = rcd->rcvavail_timeout;
11766 * This algorithm doubles or halves the timeout depending on whether
11767 * the number of packets received in this interrupt were less than or
11768 * greater equal the interrupt count.
11770 * The calculations below do not allow a steady state to be achieved.
11771 * Only at the endpoints it is possible to have an unchanging
11774 if (npkts < rcv_intr_count) {
11776 * Not enough packets arrived before the timeout, adjust
11777 * timeout downward.
11779 if (timeout < 2) /* already at minimum? */
11784 * More than enough packets arrived before the timeout, adjust
11787 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11789 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11792 rcd->rcvavail_timeout = timeout;
11794 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11795 * been verified to be in range
11797 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11799 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11802 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11803 u32 intr_adjust, u32 npkts)
11805 struct hfi1_devdata *dd = rcd->dd;
11807 u32 ctxt = rcd->ctxt;
11810 * Need to write timeout register before updating RcvHdrHead to ensure
11811 * that a new value is used when the HW decides to restart counting.
11814 adjust_rcv_timeout(rcd, npkts);
11816 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11817 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11818 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11821 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11822 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11823 << RCV_HDR_HEAD_HEAD_SHIFT);
11824 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11828 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11832 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11833 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11835 if (rcd->rcvhdrtail_kvaddr)
11836 tail = get_rcvhdrtail(rcd);
11838 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11840 return head == tail;
11844 * Context Control and Receive Array encoding for buffer size:
11853 * 0x8 512 KB (Receive Array only)
11854 * 0x9 1 MB (Receive Array only)
11855 * 0xa 2 MB (Receive Array only)
11857 * 0xB-0xF - reserved (Receive Array only)
11860 * This routine assumes that the value has already been sanity checked.
11862 static u32 encoded_size(u32 size)
11865 case 4 * 1024: return 0x1;
11866 case 8 * 1024: return 0x2;
11867 case 16 * 1024: return 0x3;
11868 case 32 * 1024: return 0x4;
11869 case 64 * 1024: return 0x5;
11870 case 128 * 1024: return 0x6;
11871 case 256 * 1024: return 0x7;
11872 case 512 * 1024: return 0x8;
11873 case 1 * 1024 * 1024: return 0x9;
11874 case 2 * 1024 * 1024: return 0xa;
11876 return 0x1; /* if invalid, go with the minimum size */
11879 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11880 struct hfi1_ctxtdata *rcd)
11883 int did_enable = 0;
11891 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11893 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11894 /* if the context already enabled, don't do the extra steps */
11895 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11896 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11897 /* reset the tail and hdr addresses, and sequence count */
11898 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11900 if (rcd->rcvhdrtail_kvaddr)
11901 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11902 rcd->rcvhdrqtailaddr_dma);
11905 /* reset the cached receive header queue head value */
11909 * Zero the receive header queue so we don't get false
11910 * positives when checking the sequence number. The
11911 * sequence numbers could land exactly on the same spot.
11912 * E.g. a rcd restart before the receive header wrapped.
11914 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11916 /* starting timeout */
11917 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11919 /* enable the context */
11920 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11922 /* clean the egr buffer size first */
11923 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11924 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11925 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11926 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11928 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11929 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11932 /* zero RcvEgrIndexHead */
11933 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11935 /* set eager count and base index */
11936 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11937 & RCV_EGR_CTRL_EGR_CNT_MASK)
11938 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11939 (((rcd->eager_base >> RCV_SHIFT)
11940 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11941 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11942 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11945 * Set TID (expected) count and base index.
11946 * rcd->expected_count is set to individual RcvArray entries,
11947 * not pairs, and the CSR takes a pair-count in groups of
11948 * four, so divide by 8.
11950 reg = (((rcd->expected_count >> RCV_SHIFT)
11951 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11952 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11953 (((rcd->expected_base >> RCV_SHIFT)
11954 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11955 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11956 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11957 if (ctxt == HFI1_CTRL_CTXT)
11958 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11960 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11961 write_csr(dd, RCV_VL15, 0);
11963 * When receive context is being disabled turn on tail
11964 * update with a dummy tail address and then disable
11967 if (dd->rcvhdrtail_dummy_dma) {
11968 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11969 dd->rcvhdrtail_dummy_dma);
11970 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11971 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11974 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11976 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11977 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11978 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11979 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11980 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11981 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11982 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11983 /* See comment on RcvCtxtCtrl.TailUpd above */
11984 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11985 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11987 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11988 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11989 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11990 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11991 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11993 * In one-packet-per-eager mode, the size comes from
11994 * the RcvArray entry.
11996 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11997 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11999 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12000 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12001 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12002 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12003 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12004 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12005 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12006 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12007 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12008 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12009 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12010 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12012 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12014 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12015 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12017 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12019 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12020 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12021 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12022 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12023 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12024 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12025 ctxt, reg, reg == 0 ? "not" : "still");
12031 * The interrupt timeout and count must be set after
12032 * the context is enabled to take effect.
12034 /* set interrupt timeout */
12035 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12036 (u64)rcd->rcvavail_timeout <<
12037 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12039 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12040 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12041 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12044 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12046 * If the context has been disabled and the Tail Update has
12047 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12048 * so it doesn't contain an address that is invalid.
12050 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12051 dd->rcvhdrtail_dummy_dma);
12054 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12060 ret = dd->cntrnameslen;
12061 *namep = dd->cntrnames;
12063 const struct cntr_entry *entry;
12066 ret = (dd->ndevcntrs) * sizeof(u64);
12068 /* Get the start of the block of counters */
12069 *cntrp = dd->cntrs;
12072 * Now go and fill in each counter in the block.
12074 for (i = 0; i < DEV_CNTR_LAST; i++) {
12075 entry = &dev_cntrs[i];
12076 hfi1_cdbg(CNTR, "reading %s", entry->name);
12077 if (entry->flags & CNTR_DISABLED) {
12079 hfi1_cdbg(CNTR, "\tDisabled\n");
12081 if (entry->flags & CNTR_VL) {
12082 hfi1_cdbg(CNTR, "\tPer VL\n");
12083 for (j = 0; j < C_VL_COUNT; j++) {
12084 val = entry->rw_cntr(entry,
12090 "\t\tRead 0x%llx for %d\n",
12092 dd->cntrs[entry->offset + j] =
12095 } else if (entry->flags & CNTR_SDMA) {
12097 "\t Per SDMA Engine\n");
12098 for (j = 0; j < chip_sdma_engines(dd);
12101 entry->rw_cntr(entry, dd, j,
12104 "\t\tRead 0x%llx for %d\n",
12106 dd->cntrs[entry->offset + j] =
12110 val = entry->rw_cntr(entry, dd,
12113 dd->cntrs[entry->offset] = val;
12114 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12123 * Used by sysfs to create files for hfi stats to read
12125 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12131 ret = ppd->dd->portcntrnameslen;
12132 *namep = ppd->dd->portcntrnames;
12134 const struct cntr_entry *entry;
12137 ret = ppd->dd->nportcntrs * sizeof(u64);
12138 *cntrp = ppd->cntrs;
12140 for (i = 0; i < PORT_CNTR_LAST; i++) {
12141 entry = &port_cntrs[i];
12142 hfi1_cdbg(CNTR, "reading %s", entry->name);
12143 if (entry->flags & CNTR_DISABLED) {
12145 hfi1_cdbg(CNTR, "\tDisabled\n");
12149 if (entry->flags & CNTR_VL) {
12150 hfi1_cdbg(CNTR, "\tPer VL");
12151 for (j = 0; j < C_VL_COUNT; j++) {
12152 val = entry->rw_cntr(entry, ppd, j,
12157 "\t\tRead 0x%llx for %d",
12159 ppd->cntrs[entry->offset + j] = val;
12162 val = entry->rw_cntr(entry, ppd,
12166 ppd->cntrs[entry->offset] = val;
12167 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12174 static void free_cntrs(struct hfi1_devdata *dd)
12176 struct hfi1_pportdata *ppd;
12179 if (dd->synth_stats_timer.function)
12180 del_timer_sync(&dd->synth_stats_timer);
12181 ppd = (struct hfi1_pportdata *)(dd + 1);
12182 for (i = 0; i < dd->num_pports; i++, ppd++) {
12184 kfree(ppd->scntrs);
12185 free_percpu(ppd->ibport_data.rvp.rc_acks);
12186 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12187 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12189 ppd->scntrs = NULL;
12190 ppd->ibport_data.rvp.rc_acks = NULL;
12191 ppd->ibport_data.rvp.rc_qacks = NULL;
12192 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12194 kfree(dd->portcntrnames);
12195 dd->portcntrnames = NULL;
12200 kfree(dd->cntrnames);
12201 dd->cntrnames = NULL;
12202 if (dd->update_cntr_wq) {
12203 destroy_workqueue(dd->update_cntr_wq);
12204 dd->update_cntr_wq = NULL;
12208 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12209 u64 *psval, void *context, int vl)
12214 if (entry->flags & CNTR_DISABLED) {
12215 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12219 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12221 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12223 /* If its a synthetic counter there is more work we need to do */
12224 if (entry->flags & CNTR_SYNTH) {
12225 if (sval == CNTR_MAX) {
12226 /* No need to read already saturated */
12230 if (entry->flags & CNTR_32BIT) {
12231 /* 32bit counters can wrap multiple times */
12232 u64 upper = sval >> 32;
12233 u64 lower = (sval << 32) >> 32;
12235 if (lower > val) { /* hw wrapped */
12236 if (upper == CNTR_32BIT_MAX)
12242 if (val != CNTR_MAX)
12243 val = (upper << 32) | val;
12246 /* If we rolled we are saturated */
12247 if ((val < sval) || (val > CNTR_MAX))
12254 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12259 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12260 struct cntr_entry *entry,
12261 u64 *psval, void *context, int vl, u64 data)
12265 if (entry->flags & CNTR_DISABLED) {
12266 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12270 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12272 if (entry->flags & CNTR_SYNTH) {
12274 if (entry->flags & CNTR_32BIT) {
12275 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12276 (data << 32) >> 32);
12277 val = data; /* return the full 64bit value */
12279 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12283 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12288 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12293 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12295 struct cntr_entry *entry;
12298 entry = &dev_cntrs[index];
12299 sval = dd->scntrs + entry->offset;
12301 if (vl != CNTR_INVALID_VL)
12304 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12307 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12309 struct cntr_entry *entry;
12312 entry = &dev_cntrs[index];
12313 sval = dd->scntrs + entry->offset;
12315 if (vl != CNTR_INVALID_VL)
12318 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12321 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12323 struct cntr_entry *entry;
12326 entry = &port_cntrs[index];
12327 sval = ppd->scntrs + entry->offset;
12329 if (vl != CNTR_INVALID_VL)
12332 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12333 (index <= C_RCV_HDR_OVF_LAST)) {
12334 /* We do not want to bother for disabled contexts */
12338 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12341 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12343 struct cntr_entry *entry;
12346 entry = &port_cntrs[index];
12347 sval = ppd->scntrs + entry->offset;
12349 if (vl != CNTR_INVALID_VL)
12352 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12353 (index <= C_RCV_HDR_OVF_LAST)) {
12354 /* We do not want to bother for disabled contexts */
12358 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12361 static void do_update_synth_timer(struct work_struct *work)
12368 struct hfi1_pportdata *ppd;
12369 struct cntr_entry *entry;
12370 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12374 * Rather than keep beating on the CSRs pick a minimal set that we can
12375 * check to watch for potential roll over. We can do this by looking at
12376 * the number of flits sent/recv. If the total flits exceeds 32bits then
12377 * we have to iterate all the counters and update.
12379 entry = &dev_cntrs[C_DC_RCV_FLITS];
12380 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12382 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12383 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12387 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12388 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12390 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12392 * May not be strictly necessary to update but it won't hurt and
12393 * simplifies the logic here.
12396 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12399 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12401 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12402 total_flits, (u64)CNTR_32BIT_MAX);
12403 if (total_flits >= CNTR_32BIT_MAX) {
12404 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12411 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12412 for (i = 0; i < DEV_CNTR_LAST; i++) {
12413 entry = &dev_cntrs[i];
12414 if (entry->flags & CNTR_VL) {
12415 for (vl = 0; vl < C_VL_COUNT; vl++)
12416 read_dev_cntr(dd, i, vl);
12418 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12421 ppd = (struct hfi1_pportdata *)(dd + 1);
12422 for (i = 0; i < dd->num_pports; i++, ppd++) {
12423 for (j = 0; j < PORT_CNTR_LAST; j++) {
12424 entry = &port_cntrs[j];
12425 if (entry->flags & CNTR_VL) {
12426 for (vl = 0; vl < C_VL_COUNT; vl++)
12427 read_port_cntr(ppd, j, vl);
12429 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12435 * We want the value in the register. The goal is to keep track
12436 * of the number of "ticks" not the counter value. In other
12437 * words if the register rolls we want to notice it and go ahead
12438 * and force an update.
12440 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12441 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12444 entry = &dev_cntrs[C_DC_RCV_FLITS];
12445 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12448 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12449 dd->unit, dd->last_tx, dd->last_rx);
12452 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12456 static void update_synth_timer(struct timer_list *t)
12458 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12460 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12461 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12464 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12465 static int init_cntrs(struct hfi1_devdata *dd)
12467 int i, rcv_ctxts, j;
12470 char name[C_MAX_NAME];
12471 struct hfi1_pportdata *ppd;
12472 const char *bit_type_32 = ",32";
12473 const int bit_type_32_sz = strlen(bit_type_32);
12474 u32 sdma_engines = chip_sdma_engines(dd);
12476 /* set up the stats timer; the add_timer is done at the end */
12477 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12479 /***********************/
12480 /* per device counters */
12481 /***********************/
12483 /* size names and determine how many we have*/
12487 for (i = 0; i < DEV_CNTR_LAST; i++) {
12488 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12489 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12493 if (dev_cntrs[i].flags & CNTR_VL) {
12494 dev_cntrs[i].offset = dd->ndevcntrs;
12495 for (j = 0; j < C_VL_COUNT; j++) {
12496 snprintf(name, C_MAX_NAME, "%s%d",
12497 dev_cntrs[i].name, vl_from_idx(j));
12498 sz += strlen(name);
12499 /* Add ",32" for 32-bit counters */
12500 if (dev_cntrs[i].flags & CNTR_32BIT)
12501 sz += bit_type_32_sz;
12505 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12506 dev_cntrs[i].offset = dd->ndevcntrs;
12507 for (j = 0; j < sdma_engines; j++) {
12508 snprintf(name, C_MAX_NAME, "%s%d",
12509 dev_cntrs[i].name, j);
12510 sz += strlen(name);
12511 /* Add ",32" for 32-bit counters */
12512 if (dev_cntrs[i].flags & CNTR_32BIT)
12513 sz += bit_type_32_sz;
12518 /* +1 for newline. */
12519 sz += strlen(dev_cntrs[i].name) + 1;
12520 /* Add ",32" for 32-bit counters */
12521 if (dev_cntrs[i].flags & CNTR_32BIT)
12522 sz += bit_type_32_sz;
12523 dev_cntrs[i].offset = dd->ndevcntrs;
12528 /* allocate space for the counter values */
12529 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12534 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12538 /* allocate space for the counter names */
12539 dd->cntrnameslen = sz;
12540 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12541 if (!dd->cntrnames)
12544 /* fill in the names */
12545 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12546 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12548 } else if (dev_cntrs[i].flags & CNTR_VL) {
12549 for (j = 0; j < C_VL_COUNT; j++) {
12550 snprintf(name, C_MAX_NAME, "%s%d",
12553 memcpy(p, name, strlen(name));
12556 /* Counter is 32 bits */
12557 if (dev_cntrs[i].flags & CNTR_32BIT) {
12558 memcpy(p, bit_type_32, bit_type_32_sz);
12559 p += bit_type_32_sz;
12564 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12565 for (j = 0; j < sdma_engines; j++) {
12566 snprintf(name, C_MAX_NAME, "%s%d",
12567 dev_cntrs[i].name, j);
12568 memcpy(p, name, strlen(name));
12571 /* Counter is 32 bits */
12572 if (dev_cntrs[i].flags & CNTR_32BIT) {
12573 memcpy(p, bit_type_32, bit_type_32_sz);
12574 p += bit_type_32_sz;
12580 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12581 p += strlen(dev_cntrs[i].name);
12583 /* Counter is 32 bits */
12584 if (dev_cntrs[i].flags & CNTR_32BIT) {
12585 memcpy(p, bit_type_32, bit_type_32_sz);
12586 p += bit_type_32_sz;
12593 /*********************/
12594 /* per port counters */
12595 /*********************/
12598 * Go through the counters for the overflows and disable the ones we
12599 * don't need. This varies based on platform so we need to do it
12600 * dynamically here.
12602 rcv_ctxts = dd->num_rcv_contexts;
12603 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12604 i <= C_RCV_HDR_OVF_LAST; i++) {
12605 port_cntrs[i].flags |= CNTR_DISABLED;
12608 /* size port counter names and determine how many we have*/
12610 dd->nportcntrs = 0;
12611 for (i = 0; i < PORT_CNTR_LAST; i++) {
12612 if (port_cntrs[i].flags & CNTR_DISABLED) {
12613 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12617 if (port_cntrs[i].flags & CNTR_VL) {
12618 port_cntrs[i].offset = dd->nportcntrs;
12619 for (j = 0; j < C_VL_COUNT; j++) {
12620 snprintf(name, C_MAX_NAME, "%s%d",
12621 port_cntrs[i].name, vl_from_idx(j));
12622 sz += strlen(name);
12623 /* Add ",32" for 32-bit counters */
12624 if (port_cntrs[i].flags & CNTR_32BIT)
12625 sz += bit_type_32_sz;
12630 /* +1 for newline */
12631 sz += strlen(port_cntrs[i].name) + 1;
12632 /* Add ",32" for 32-bit counters */
12633 if (port_cntrs[i].flags & CNTR_32BIT)
12634 sz += bit_type_32_sz;
12635 port_cntrs[i].offset = dd->nportcntrs;
12640 /* allocate space for the counter names */
12641 dd->portcntrnameslen = sz;
12642 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12643 if (!dd->portcntrnames)
12646 /* fill in port cntr names */
12647 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12648 if (port_cntrs[i].flags & CNTR_DISABLED)
12651 if (port_cntrs[i].flags & CNTR_VL) {
12652 for (j = 0; j < C_VL_COUNT; j++) {
12653 snprintf(name, C_MAX_NAME, "%s%d",
12654 port_cntrs[i].name, vl_from_idx(j));
12655 memcpy(p, name, strlen(name));
12658 /* Counter is 32 bits */
12659 if (port_cntrs[i].flags & CNTR_32BIT) {
12660 memcpy(p, bit_type_32, bit_type_32_sz);
12661 p += bit_type_32_sz;
12667 memcpy(p, port_cntrs[i].name,
12668 strlen(port_cntrs[i].name));
12669 p += strlen(port_cntrs[i].name);
12671 /* Counter is 32 bits */
12672 if (port_cntrs[i].flags & CNTR_32BIT) {
12673 memcpy(p, bit_type_32, bit_type_32_sz);
12674 p += bit_type_32_sz;
12681 /* allocate per port storage for counter values */
12682 ppd = (struct hfi1_pportdata *)(dd + 1);
12683 for (i = 0; i < dd->num_pports; i++, ppd++) {
12684 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12688 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12693 /* CPU counters need to be allocated and zeroed */
12694 if (init_cpu_counters(dd))
12697 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12698 WQ_MEM_RECLAIM, dd->unit);
12699 if (!dd->update_cntr_wq)
12702 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12704 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12711 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12713 switch (chip_lstate) {
12716 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12720 return IB_PORT_DOWN;
12722 return IB_PORT_INIT;
12724 return IB_PORT_ARMED;
12725 case LSTATE_ACTIVE:
12726 return IB_PORT_ACTIVE;
12730 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12732 /* look at the HFI meta-states only */
12733 switch (chip_pstate & 0xf0) {
12735 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12739 return IB_PORTPHYSSTATE_DISABLED;
12741 return OPA_PORTPHYSSTATE_OFFLINE;
12743 return IB_PORTPHYSSTATE_POLLING;
12744 case PLS_CONFIGPHY:
12745 return IB_PORTPHYSSTATE_TRAINING;
12747 return IB_PORTPHYSSTATE_LINKUP;
12749 return IB_PORTPHYSSTATE_PHY_TEST;
12753 /* return the OPA port logical state name */
12754 const char *opa_lstate_name(u32 lstate)
12756 static const char * const port_logical_names[] = {
12762 "PORT_ACTIVE_DEFER",
12764 if (lstate < ARRAY_SIZE(port_logical_names))
12765 return port_logical_names[lstate];
12769 /* return the OPA port physical state name */
12770 const char *opa_pstate_name(u32 pstate)
12772 static const char * const port_physical_names[] = {
12779 "PHYS_LINK_ERR_RECOVER",
12786 if (pstate < ARRAY_SIZE(port_physical_names))
12787 return port_physical_names[pstate];
12792 * update_statusp - Update userspace status flag
12793 * @ppd: Port data structure
12794 * @state: port state information
12796 * Actual port status is determined by the host_link_state value
12799 * host_link_state MUST be updated before updating the user space
12802 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12805 * Set port status flags in the page mapped into userspace
12806 * memory. Do it here to ensure a reliable state - this is
12807 * the only function called by all state handling code.
12808 * Always set the flags due to the fact that the cache value
12809 * might have been changed explicitly outside of this
12812 if (ppd->statusp) {
12816 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12817 HFI1_STATUS_IB_READY);
12819 case IB_PORT_ARMED:
12820 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12822 case IB_PORT_ACTIVE:
12823 *ppd->statusp |= HFI1_STATUS_IB_READY;
12827 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12828 opa_lstate_name(state), state);
12832 * wait_logical_linkstate - wait for an IB link state change to occur
12833 * @ppd: port device
12834 * @state: the state to wait for
12835 * @msecs: the number of milliseconds to wait
12837 * Wait up to msecs milliseconds for IB link state change to occur.
12838 * For now, take the easy polling route.
12839 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12841 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12844 unsigned long timeout;
12847 timeout = jiffies + msecs_to_jiffies(msecs);
12849 new_state = chip_to_opa_lstate(ppd->dd,
12850 read_logical_state(ppd->dd));
12851 if (new_state == state)
12853 if (time_after(jiffies, timeout)) {
12854 dd_dev_err(ppd->dd,
12855 "timeout waiting for link state 0x%x\n",
12865 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12867 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12869 dd_dev_info(ppd->dd,
12870 "physical state changed to %s (0x%x), phy 0x%x\n",
12871 opa_pstate_name(ib_pstate), ib_pstate, state);
12875 * Read the physical hardware link state and check if it matches host
12876 * drivers anticipated state.
12878 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12880 u32 read_state = read_physical_state(ppd->dd);
12882 if (read_state == state) {
12883 log_state_transition(ppd, state);
12885 dd_dev_err(ppd->dd,
12886 "anticipated phy link state 0x%x, read 0x%x\n",
12887 state, read_state);
12892 * wait_physical_linkstate - wait for an physical link state change to occur
12893 * @ppd: port device
12894 * @state: the state to wait for
12895 * @msecs: the number of milliseconds to wait
12897 * Wait up to msecs milliseconds for physical link state change to occur.
12898 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12900 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12904 unsigned long timeout;
12906 timeout = jiffies + msecs_to_jiffies(msecs);
12908 read_state = read_physical_state(ppd->dd);
12909 if (read_state == state)
12911 if (time_after(jiffies, timeout)) {
12912 dd_dev_err(ppd->dd,
12913 "timeout waiting for phy link state 0x%x\n",
12917 usleep_range(1950, 2050); /* sleep 2ms-ish */
12920 log_state_transition(ppd, state);
12925 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12926 * @ppd: port device
12927 * @msecs: the number of milliseconds to wait
12929 * Wait up to msecs milliseconds for any offline physical link
12930 * state change to occur.
12931 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12933 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12937 unsigned long timeout;
12939 timeout = jiffies + msecs_to_jiffies(msecs);
12941 read_state = read_physical_state(ppd->dd);
12942 if ((read_state & 0xF0) == PLS_OFFLINE)
12944 if (time_after(jiffies, timeout)) {
12945 dd_dev_err(ppd->dd,
12946 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12947 read_state, msecs);
12950 usleep_range(1950, 2050); /* sleep 2ms-ish */
12953 log_state_transition(ppd, read_state);
12958 * wait_phys_link_out_of_offline - wait for any out of offline state
12959 * @ppd: port device
12960 * @msecs: the number of milliseconds to wait
12962 * Wait up to msecs milliseconds for any out of offline physical link
12963 * state change to occur.
12964 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12966 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12970 unsigned long timeout;
12972 timeout = jiffies + msecs_to_jiffies(msecs);
12974 read_state = read_physical_state(ppd->dd);
12975 if ((read_state & 0xF0) != PLS_OFFLINE)
12977 if (time_after(jiffies, timeout)) {
12978 dd_dev_err(ppd->dd,
12979 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12980 read_state, msecs);
12983 usleep_range(1950, 2050); /* sleep 2ms-ish */
12986 log_state_transition(ppd, read_state);
12990 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12991 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12993 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12994 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12996 void hfi1_init_ctxt(struct send_context *sc)
12999 struct hfi1_devdata *dd = sc->dd;
13001 u8 set = (sc->type == SC_USER ?
13002 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13003 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13004 reg = read_kctxt_csr(dd, sc->hw_context,
13005 SEND_CTXT_CHECK_ENABLE);
13007 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13009 SET_STATIC_RATE_CONTROL_SMASK(reg);
13010 write_kctxt_csr(dd, sc->hw_context,
13011 SEND_CTXT_CHECK_ENABLE, reg);
13015 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13020 if (dd->icode != ICODE_RTL_SILICON) {
13021 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13022 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13026 reg = read_csr(dd, ASIC_STS_THERM);
13027 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13028 ASIC_STS_THERM_CURR_TEMP_MASK);
13029 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13030 ASIC_STS_THERM_LO_TEMP_MASK);
13031 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13032 ASIC_STS_THERM_HI_TEMP_MASK);
13033 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13034 ASIC_STS_THERM_CRIT_TEMP_MASK);
13035 /* triggers is a 3-bit value - 1 bit per trigger. */
13036 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13042 * get_int_mask - get 64 bit int mask
13043 * @dd - the devdata
13044 * @i - the csr (relative to CCE_INT_MASK)
13046 * Returns the mask with the urgent interrupt mask
13047 * bit clear for kernel receive contexts.
13049 static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
13051 u64 mask = U64_MAX; /* default to no change */
13053 if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
13054 int j = (i - (IS_RCVURGENT_START / 64)) * 64;
13055 int k = !j ? IS_RCVURGENT_START % 64 : 0;
13058 j -= IS_RCVURGENT_START % 64;
13059 /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
13060 for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
13061 /* convert to bit in mask and clear */
13062 mask &= ~BIT_ULL(k);
13067 /* ========================================================================= */
13070 * Enable/disable chip from delivering interrupts.
13072 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
13077 * In HFI, the mask needs to be 1 to allow interrupts.
13080 /* enable all interrupts but urgent on kernel contexts */
13081 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13082 u64 mask = get_int_mask(dd, i);
13084 write_csr(dd, CCE_INT_MASK + (8 * i), mask);
13089 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13090 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13095 * Clear all interrupt sources on the chip.
13097 static void clear_all_interrupts(struct hfi1_devdata *dd)
13101 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13102 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13104 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13105 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13106 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13107 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13108 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13109 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13110 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13111 for (i = 0; i < chip_send_contexts(dd); i++)
13112 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13113 for (i = 0; i < chip_sdma_engines(dd); i++)
13114 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13116 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13117 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13118 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13122 * hfi1_clean_up_interrupts() - Free all IRQ resources
13123 * @dd: valid device data data structure
13125 * Free the MSIx and assoicated PCI resources, if they have been allocated.
13127 void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
13130 struct hfi1_msix_entry *me = dd->msix_entries;
13132 /* remove irqs - must happen before disabling/turning off */
13133 for (i = 0; i < dd->num_msix_entries; i++, me++) {
13134 if (!me->arg) /* => no irq, no affinity */
13136 hfi1_put_irq_affinity(dd, me);
13137 pci_free_irq(dd->pcidev, i, me->arg);
13140 /* clean structures */
13141 kfree(dd->msix_entries);
13142 dd->msix_entries = NULL;
13143 dd->num_msix_entries = 0;
13145 pci_free_irq_vectors(dd->pcidev);
13149 * Remap the interrupt source from the general handler to the given MSI-X
13152 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13157 /* clear from the handled mask of the general interrupt */
13160 if (likely(m < CCE_NUM_INT_CSRS)) {
13161 dd->gi_mask[m] &= ~((u64)1 << n);
13163 dd_dev_err(dd, "remap interrupt err\n");
13167 /* direct the chip source to the given MSI-X interrupt */
13170 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13171 reg &= ~((u64)0xff << (8 * n));
13172 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13173 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13176 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13177 int engine, int msix_intr)
13180 * SDMA engine interrupt sources grouped by type, rather than
13181 * engine. Per-engine interrupts are as follows:
13186 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13188 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13190 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13194 static int request_msix_irqs(struct hfi1_devdata *dd)
13196 int first_general, last_general;
13197 int first_sdma, last_sdma;
13198 int first_rx, last_rx;
13201 /* calculate the ranges we are going to use */
13203 last_general = first_general + 1;
13204 first_sdma = last_general;
13205 last_sdma = first_sdma + dd->num_sdma;
13206 first_rx = last_sdma;
13207 last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
13209 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13210 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13213 * Sanity check - the code expects all SDMA chip source
13214 * interrupts to be in the same CSR, starting at bit 0. Verify
13215 * that this is true by checking the bit location of the start.
13217 BUILD_BUG_ON(IS_SDMA_START % 64);
13219 for (i = 0; i < dd->num_msix_entries; i++) {
13220 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13221 const char *err_info;
13222 irq_handler_t handler;
13223 irq_handler_t thread = NULL;
13226 struct hfi1_ctxtdata *rcd = NULL;
13227 struct sdma_engine *sde = NULL;
13228 char name[MAX_NAME_SIZE];
13230 /* obtain the arguments to pci_request_irq */
13231 if (first_general <= i && i < last_general) {
13232 idx = i - first_general;
13233 handler = general_interrupt;
13235 snprintf(name, sizeof(name),
13236 DRIVER_NAME "_%d", dd->unit);
13237 err_info = "general";
13238 me->type = IRQ_GENERAL;
13239 } else if (first_sdma <= i && i < last_sdma) {
13240 idx = i - first_sdma;
13241 sde = &dd->per_sdma[idx];
13242 handler = sdma_interrupt;
13244 snprintf(name, sizeof(name),
13245 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13247 remap_sdma_interrupts(dd, idx, i);
13248 me->type = IRQ_SDMA;
13249 } else if (first_rx <= i && i < last_rx) {
13250 idx = i - first_rx;
13251 rcd = hfi1_rcd_get_by_index_safe(dd, idx);
13254 * Set the interrupt register and mask for this
13255 * context's interrupt.
13257 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13258 rcd->imask = ((u64)1) <<
13259 ((IS_RCVAVAIL_START + idx) % 64);
13260 handler = receive_context_interrupt;
13261 thread = receive_context_thread;
13263 snprintf(name, sizeof(name),
13264 DRIVER_NAME "_%d kctxt%d",
13266 err_info = "receive context";
13267 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13268 me->type = IRQ_RCVCTXT;
13269 rcd->msix_intr = i;
13273 /* not in our expected range - complain, then
13277 "Unexpected extra MSI-X interrupt %d\n", i);
13280 /* no argument, no interrupt */
13283 /* make sure the name is terminated */
13284 name[sizeof(name) - 1] = 0;
13285 me->irq = pci_irq_vector(dd->pcidev, i);
13286 ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
13290 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13291 err_info, me->irq, idx, ret);
13295 * assign arg after pci_request_irq call, so it will be
13300 ret = hfi1_get_irq_affinity(dd, me);
13302 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13308 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13312 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13313 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13314 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13316 synchronize_irq(me->irq);
13320 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13322 struct hfi1_devdata *dd = rcd->dd;
13323 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13325 if (!me->arg) /* => no irq, no affinity */
13328 hfi1_put_irq_affinity(dd, me);
13329 pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13334 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13336 struct hfi1_devdata *dd = rcd->dd;
13337 struct hfi1_msix_entry *me;
13338 int idx = rcd->ctxt;
13342 rcd->msix_intr = dd->vnic.msix_idx++;
13343 me = &dd->msix_entries[rcd->msix_intr];
13346 * Set the interrupt register and mask for this
13347 * context's interrupt.
13349 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13350 rcd->imask = ((u64)1) <<
13351 ((IS_RCVAVAIL_START + idx) % 64);
13352 me->type = IRQ_RCVCTXT;
13353 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13354 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13356 ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
13357 receive_context_interrupt,
13358 receive_context_thread, arg,
13359 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13361 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13362 me->irq, idx, ret);
13366 * assign arg after pci_request_irq call, so it will be
13371 ret = hfi1_get_irq_affinity(dd, me);
13374 "unable to pin IRQ %d\n", ret);
13375 pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13380 * Set the general handler to accept all interrupts, remap all
13381 * chip interrupts back to MSI-X 0.
13383 static void reset_interrupts(struct hfi1_devdata *dd)
13387 /* all interrupts handled by the general handler */
13388 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13389 dd->gi_mask[i] = ~(u64)0;
13391 /* all chip interrupts map to MSI-X 0 */
13392 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13393 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13396 static int set_up_interrupts(struct hfi1_devdata *dd)
13403 * 1 general, "slow path" interrupt (includes the SDMA engines
13404 * slow source, SDMACleanupDone)
13405 * N interrupts - one per used SDMA engine
13406 * M interrupt - one per kernel receive context
13407 * V interrupt - one for each VNIC context
13409 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
13411 /* ask for MSI-X interrupts */
13412 request = request_msix(dd, total);
13417 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13419 if (!dd->msix_entries) {
13424 dd->num_msix_entries = total;
13425 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13428 /* mask all interrupts */
13429 set_intr_state(dd, 0);
13430 /* clear all pending interrupts */
13431 clear_all_interrupts(dd);
13433 /* reset general handler mask, chip MSI-X mappings */
13434 reset_interrupts(dd);
13436 ret = request_msix_irqs(dd);
13443 hfi1_clean_up_interrupts(dd);
13448 * Set up context values in dd. Sets:
13450 * num_rcv_contexts - number of contexts being used
13451 * n_krcv_queues - number of kernel contexts
13452 * first_dyn_alloc_ctxt - first dynamically allocated context
13453 * in array of contexts
13454 * freectxts - number of free user contexts
13455 * num_send_contexts - number of PIO send contexts being used
13456 * num_vnic_contexts - number of contexts reserved for VNIC
13458 static int set_up_context_variables(struct hfi1_devdata *dd)
13460 unsigned long num_kernel_contexts;
13461 u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13462 int total_contexts;
13466 int user_rmt_reduced;
13468 u32 send_contexts = chip_send_contexts(dd);
13469 u32 rcv_contexts = chip_rcv_contexts(dd);
13472 * Kernel receive contexts:
13473 * - Context 0 - control context (VL15/multicast/error)
13474 * - Context 1 - first kernel context
13475 * - Context 2 - second kernel context
13480 * n_krcvqs is the sum of module parameter kernel receive
13481 * contexts, krcvqs[]. It does not include the control
13482 * context, so add that.
13484 num_kernel_contexts = n_krcvqs + 1;
13486 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13488 * Every kernel receive context needs an ACK send context.
13489 * one send context is allocated for each VL{0-7} and VL15
13491 if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13493 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13494 send_contexts - num_vls - 1,
13495 num_kernel_contexts);
13496 num_kernel_contexts = send_contexts - num_vls - 1;
13499 /* Accommodate VNIC contexts if possible */
13500 if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13501 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13502 num_vnic_contexts = 0;
13504 total_contexts = num_kernel_contexts + num_vnic_contexts;
13508 * - default to 1 user context per real (non-HT) CPU core if
13509 * num_user_contexts is negative
13511 if (num_user_contexts < 0)
13512 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13514 n_usr_ctxts = num_user_contexts;
13516 * Adjust the counts given a global max.
13518 if (total_contexts + n_usr_ctxts > rcv_contexts) {
13520 "Reducing # user receive contexts to: %d, from %u\n",
13521 rcv_contexts - total_contexts,
13524 n_usr_ctxts = rcv_contexts - total_contexts;
13528 * The RMT entries are currently allocated as shown below:
13529 * 1. QOS (0 to 128 entries);
13530 * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
13531 * 3. VNIC (num_vnic_contexts).
13532 * It should be noted that PSM FECN oversubscribe num_vnic_contexts
13533 * entries of RMT because both VNIC and PSM could allocate any receive
13534 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13535 * and PSM FECN must reserve an RMT entry for each possible PSM receive
13538 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13539 if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13540 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13542 "RMT size is reducing the number of user receive contexts from %u to %d\n",
13546 n_usr_ctxts = user_rmt_reduced;
13549 total_contexts += n_usr_ctxts;
13551 /* the first N are kernel contexts, the rest are user/vnic contexts */
13552 dd->num_rcv_contexts = total_contexts;
13553 dd->n_krcv_queues = num_kernel_contexts;
13554 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13555 dd->num_vnic_contexts = num_vnic_contexts;
13556 dd->num_user_contexts = n_usr_ctxts;
13557 dd->freectxts = n_usr_ctxts;
13559 "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13561 (int)dd->num_rcv_contexts,
13562 (int)dd->n_krcv_queues,
13563 dd->num_vnic_contexts,
13564 dd->num_user_contexts);
13567 * Receive array allocation:
13568 * All RcvArray entries are divided into groups of 8. This
13569 * is required by the hardware and will speed up writes to
13570 * consecutive entries by using write-combining of the entire
13573 * The number of groups are evenly divided among all contexts.
13574 * any left over groups will be given to the first N user
13577 dd->rcv_entries.group_size = RCV_INCREMENT;
13578 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13579 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13580 dd->rcv_entries.nctxt_extra = ngroups -
13581 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13582 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13583 dd->rcv_entries.ngroups,
13584 dd->rcv_entries.nctxt_extra);
13585 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13586 MAX_EAGER_ENTRIES * 2) {
13587 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13588 dd->rcv_entries.group_size;
13590 "RcvArray group count too high, change to %u\n",
13591 dd->rcv_entries.ngroups);
13592 dd->rcv_entries.nctxt_extra = 0;
13595 * PIO send contexts
13597 ret = init_sc_pools_and_sizes(dd);
13598 if (ret >= 0) { /* success */
13599 dd->num_send_contexts = ret;
13602 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13604 dd->num_send_contexts,
13605 dd->sc_sizes[SC_KERNEL].count,
13606 dd->sc_sizes[SC_ACK].count,
13607 dd->sc_sizes[SC_USER].count,
13608 dd->sc_sizes[SC_VL15].count);
13609 ret = 0; /* success */
13616 * Set the device/port partition key table. The MAD code
13617 * will ensure that, at least, the partial management
13618 * partition key is present in the table.
13620 static void set_partition_keys(struct hfi1_pportdata *ppd)
13622 struct hfi1_devdata *dd = ppd->dd;
13626 dd_dev_info(dd, "Setting partition keys\n");
13627 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13628 reg |= (ppd->pkeys[i] &
13629 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13631 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13632 /* Each register holds 4 PKey values. */
13633 if ((i % 4) == 3) {
13634 write_csr(dd, RCV_PARTITION_KEY +
13635 ((i - 3) * 2), reg);
13640 /* Always enable HW pkeys check when pkeys table is set */
13641 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13645 * These CSRs and memories are uninitialized on reset and must be
13646 * written before reading to set the ECC/parity bits.
13648 * NOTE: All user context CSRs that are not mmaped write-only
13649 * (e.g. the TID flows) must be initialized even if the driver never
13652 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13657 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13658 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13660 /* SendCtxtCreditReturnAddr */
13661 for (i = 0; i < chip_send_contexts(dd); i++)
13662 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13664 /* PIO Send buffers */
13665 /* SDMA Send buffers */
13667 * These are not normally read, and (presently) have no method
13668 * to be read, so are not pre-initialized
13672 /* RcvHdrTailAddr */
13673 /* RcvTidFlowTable */
13674 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13675 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13676 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13677 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13678 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13682 for (i = 0; i < chip_rcv_array_count(dd); i++)
13683 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13685 /* RcvQPMapTable */
13686 for (i = 0; i < 32; i++)
13687 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13691 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13693 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13696 unsigned long timeout;
13699 /* is the condition present? */
13700 reg = read_csr(dd, CCE_STATUS);
13701 if ((reg & status_bits) == 0)
13704 /* clear the condition */
13705 write_csr(dd, CCE_CTRL, ctrl_bits);
13707 /* wait for the condition to clear */
13708 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13710 reg = read_csr(dd, CCE_STATUS);
13711 if ((reg & status_bits) == 0)
13713 if (time_after(jiffies, timeout)) {
13715 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13716 status_bits, reg & status_bits);
13723 /* set CCE CSRs to chip reset defaults */
13724 static void reset_cce_csrs(struct hfi1_devdata *dd)
13728 /* CCE_REVISION read-only */
13729 /* CCE_REVISION2 read-only */
13730 /* CCE_CTRL - bits clear automatically */
13731 /* CCE_STATUS read-only, use CceCtrl to clear */
13732 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13733 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13734 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13735 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13736 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13737 /* CCE_ERR_STATUS read-only */
13738 write_csr(dd, CCE_ERR_MASK, 0);
13739 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13740 /* CCE_ERR_FORCE leave alone */
13741 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13742 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13743 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13744 /* CCE_PCIE_CTRL leave alone */
13745 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13746 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13747 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13748 CCE_MSIX_TABLE_UPPER_RESETCSR);
13750 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13751 /* CCE_MSIX_PBA read-only */
13752 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13753 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13755 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13756 write_csr(dd, CCE_INT_MAP, 0);
13757 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13758 /* CCE_INT_STATUS read-only */
13759 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13760 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13761 /* CCE_INT_FORCE leave alone */
13762 /* CCE_INT_BLOCKED read-only */
13764 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13765 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13768 /* set MISC CSRs to chip reset defaults */
13769 static void reset_misc_csrs(struct hfi1_devdata *dd)
13773 for (i = 0; i < 32; i++) {
13774 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13775 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13776 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13779 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13780 * only be written 128-byte chunks
13782 /* init RSA engine to clear lingering errors */
13783 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13784 write_csr(dd, MISC_CFG_RSA_MU, 0);
13785 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13786 /* MISC_STS_8051_DIGEST read-only */
13787 /* MISC_STS_SBM_DIGEST read-only */
13788 /* MISC_STS_PCIE_DIGEST read-only */
13789 /* MISC_STS_FAB_DIGEST read-only */
13790 /* MISC_ERR_STATUS read-only */
13791 write_csr(dd, MISC_ERR_MASK, 0);
13792 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13793 /* MISC_ERR_FORCE leave alone */
13796 /* set TXE CSRs to chip reset defaults */
13797 static void reset_txe_csrs(struct hfi1_devdata *dd)
13804 write_csr(dd, SEND_CTRL, 0);
13805 __cm_reset(dd, 0); /* reset CM internal state */
13806 /* SEND_CONTEXTS read-only */
13807 /* SEND_DMA_ENGINES read-only */
13808 /* SEND_PIO_MEM_SIZE read-only */
13809 /* SEND_DMA_MEM_SIZE read-only */
13810 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13811 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13812 /* SEND_PIO_ERR_STATUS read-only */
13813 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13814 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13815 /* SEND_PIO_ERR_FORCE leave alone */
13816 /* SEND_DMA_ERR_STATUS read-only */
13817 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13818 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13819 /* SEND_DMA_ERR_FORCE leave alone */
13820 /* SEND_EGRESS_ERR_STATUS read-only */
13821 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13822 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13823 /* SEND_EGRESS_ERR_FORCE leave alone */
13824 write_csr(dd, SEND_BTH_QP, 0);
13825 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13826 write_csr(dd, SEND_SC2VLT0, 0);
13827 write_csr(dd, SEND_SC2VLT1, 0);
13828 write_csr(dd, SEND_SC2VLT2, 0);
13829 write_csr(dd, SEND_SC2VLT3, 0);
13830 write_csr(dd, SEND_LEN_CHECK0, 0);
13831 write_csr(dd, SEND_LEN_CHECK1, 0);
13832 /* SEND_ERR_STATUS read-only */
13833 write_csr(dd, SEND_ERR_MASK, 0);
13834 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13835 /* SEND_ERR_FORCE read-only */
13836 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13837 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13838 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13839 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13840 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13841 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13842 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13843 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13844 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13845 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13846 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13847 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13848 /* SEND_CM_CREDIT_USED_STATUS read-only */
13849 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13850 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13851 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13852 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13853 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13854 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13855 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13856 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13857 /* SEND_CM_CREDIT_USED_VL read-only */
13858 /* SEND_CM_CREDIT_USED_VL15 read-only */
13859 /* SEND_EGRESS_CTXT_STATUS read-only */
13860 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13861 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13862 /* SEND_EGRESS_ERR_INFO read-only */
13863 /* SEND_EGRESS_ERR_SOURCE read-only */
13866 * TXE Per-Context CSRs
13868 for (i = 0; i < chip_send_contexts(dd); i++) {
13869 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13870 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13871 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13872 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13873 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13874 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13875 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13876 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13877 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13878 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13879 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13880 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13884 * TXE Per-SDMA CSRs
13886 for (i = 0; i < chip_sdma_engines(dd); i++) {
13887 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13888 /* SEND_DMA_STATUS read-only */
13889 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13890 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13891 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13892 /* SEND_DMA_HEAD read-only */
13893 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13894 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13895 /* SEND_DMA_IDLE_CNT read-only */
13896 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13897 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13898 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13899 /* SEND_DMA_ENG_ERR_STATUS read-only */
13900 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13901 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13902 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13903 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13904 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13905 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13906 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13907 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13908 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13909 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13915 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13917 static void init_rbufs(struct hfi1_devdata *dd)
13923 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13928 reg = read_csr(dd, RCV_STATUS);
13929 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13930 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13933 * Give up after 1ms - maximum wait time.
13935 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13936 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13937 * 136 KB / (66% * 250MB/s) = 844us
13939 if (count++ > 500) {
13941 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13945 udelay(2); /* do not busy-wait the CSR */
13948 /* start the init - expect RcvCtrl to be 0 */
13949 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13952 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13953 * period after the write before RcvStatus.RxRbufInitDone is valid.
13954 * The delay in the first run through the loop below is sufficient and
13955 * required before the first read of RcvStatus.RxRbufInintDone.
13957 read_csr(dd, RCV_CTRL);
13959 /* wait for the init to finish */
13962 /* delay is required first time through - see above */
13963 udelay(2); /* do not busy-wait the CSR */
13964 reg = read_csr(dd, RCV_STATUS);
13965 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13968 /* give up after 100us - slowest possible at 33MHz is 73us */
13969 if (count++ > 50) {
13971 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13978 /* set RXE CSRs to chip reset defaults */
13979 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13986 write_csr(dd, RCV_CTRL, 0);
13988 /* RCV_STATUS read-only */
13989 /* RCV_CONTEXTS read-only */
13990 /* RCV_ARRAY_CNT read-only */
13991 /* RCV_BUF_SIZE read-only */
13992 write_csr(dd, RCV_BTH_QP, 0);
13993 write_csr(dd, RCV_MULTICAST, 0);
13994 write_csr(dd, RCV_BYPASS, 0);
13995 write_csr(dd, RCV_VL15, 0);
13996 /* this is a clear-down */
13997 write_csr(dd, RCV_ERR_INFO,
13998 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13999 /* RCV_ERR_STATUS read-only */
14000 write_csr(dd, RCV_ERR_MASK, 0);
14001 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
14002 /* RCV_ERR_FORCE leave alone */
14003 for (i = 0; i < 32; i++)
14004 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
14005 for (i = 0; i < 4; i++)
14006 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
14007 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
14008 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
14009 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
14010 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
14011 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
14012 clear_rsm_rule(dd, i);
14013 for (i = 0; i < 32; i++)
14014 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
14017 * RXE Kernel and User Per-Context CSRs
14019 for (i = 0; i < chip_rcv_contexts(dd); i++) {
14021 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
14022 /* RCV_CTXT_STATUS read-only */
14023 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
14024 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
14025 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
14026 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
14027 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
14028 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
14029 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
14030 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
14031 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
14032 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
14035 /* RCV_HDR_TAIL read-only */
14036 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
14037 /* RCV_EGR_INDEX_TAIL read-only */
14038 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
14039 /* RCV_EGR_OFFSET_TAIL read-only */
14040 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
14041 write_uctxt_csr(dd, i,
14042 RCV_TID_FLOW_TABLE + (8 * j), 0);
14048 * Set sc2vl tables.
14050 * They power on to zeros, so to avoid send context errors
14051 * they need to be set:
14053 * SC 0-7 -> VL 0-7 (respectively)
14058 static void init_sc2vl_tables(struct hfi1_devdata *dd)
14061 /* init per architecture spec, constrained by hardware capability */
14063 /* HFI maps sent packets */
14064 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
14070 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
14076 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
14082 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
14089 /* DC maps received packets */
14090 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
14092 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
14093 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
14094 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
14096 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
14097 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14099 /* initialize the cached sc2vl values consistently with h/w */
14100 for (i = 0; i < 32; i++) {
14101 if (i < 8 || i == 15)
14102 *((u8 *)(dd->sc2vl) + i) = (u8)i;
14104 *((u8 *)(dd->sc2vl) + i) = 0;
14109 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
14110 * depend on the chip going through a power-on reset - a driver may be loaded
14111 * and unloaded many times.
14113 * Do not write any CSR values to the chip in this routine - there may be
14114 * a reset following the (possible) FLR in this routine.
14117 static int init_chip(struct hfi1_devdata *dd)
14123 * Put the HFI CSRs in a known state.
14124 * Combine this with a DC reset.
14126 * Stop the device from doing anything while we do a
14127 * reset. We know there are no other active users of
14128 * the device since we are now in charge. Turn off
14129 * off all outbound and inbound traffic and make sure
14130 * the device does not generate any interrupts.
14133 /* disable send contexts and SDMA engines */
14134 write_csr(dd, SEND_CTRL, 0);
14135 for (i = 0; i < chip_send_contexts(dd); i++)
14136 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14137 for (i = 0; i < chip_sdma_engines(dd); i++)
14138 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14139 /* disable port (turn off RXE inbound traffic) and contexts */
14140 write_csr(dd, RCV_CTRL, 0);
14141 for (i = 0; i < chip_rcv_contexts(dd); i++)
14142 write_csr(dd, RCV_CTXT_CTRL, 0);
14143 /* mask all interrupt sources */
14144 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14145 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14148 * DC Reset: do a full DC reset before the register clear.
14149 * A recommended length of time to hold is one CSR read,
14150 * so reread the CceDcCtrl. Then, hold the DC in reset
14151 * across the clear.
14153 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14154 (void)read_csr(dd, CCE_DC_CTRL);
14158 * A FLR will reset the SPC core and part of the PCIe.
14159 * The parts that need to be restored have already been
14162 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14164 /* do the FLR, the DC reset will remain */
14165 pcie_flr(dd->pcidev);
14167 /* restore command and BARs */
14168 ret = restore_pci_variables(dd);
14170 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14176 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14177 pcie_flr(dd->pcidev);
14178 ret = restore_pci_variables(dd);
14180 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14186 dd_dev_info(dd, "Resetting CSRs with writes\n");
14187 reset_cce_csrs(dd);
14188 reset_txe_csrs(dd);
14189 reset_rxe_csrs(dd);
14190 reset_misc_csrs(dd);
14192 /* clear the DC reset */
14193 write_csr(dd, CCE_DC_CTRL, 0);
14195 /* Set the LED off */
14199 * Clear the QSFP reset.
14200 * An FLR enforces a 0 on all out pins. The driver does not touch
14201 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
14202 * anything plugged constantly in reset, if it pays attention
14204 * Prime examples of this are optical cables. Set all pins high.
14205 * I2CCLK and I2CDAT will change per direction, and INT_N and
14206 * MODPRS_N are input only and their value is ignored.
14208 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14209 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14210 init_chip_resources(dd);
14214 static void init_early_variables(struct hfi1_devdata *dd)
14218 /* assign link credit variables */
14220 dd->link_credits = CM_GLOBAL_CREDITS;
14222 dd->link_credits--;
14223 dd->vcu = cu_to_vcu(hfi1_cu);
14224 /* enough room for 8 MAD packets plus header - 17K */
14225 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14226 if (dd->vl15_init > dd->link_credits)
14227 dd->vl15_init = dd->link_credits;
14229 write_uninitialized_csrs_and_memories(dd);
14231 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14232 for (i = 0; i < dd->num_pports; i++) {
14233 struct hfi1_pportdata *ppd = &dd->pport[i];
14235 set_partition_keys(ppd);
14237 init_sc2vl_tables(dd);
14240 static void init_kdeth_qp(struct hfi1_devdata *dd)
14242 /* user changed the KDETH_QP */
14243 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14244 /* out of range or illegal value */
14245 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14248 if (kdeth_qp == 0) /* not set, or failed range check */
14249 kdeth_qp = DEFAULT_KDETH_QP;
14251 write_csr(dd, SEND_BTH_QP,
14252 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14253 SEND_BTH_QP_KDETH_QP_SHIFT);
14255 write_csr(dd, RCV_BTH_QP,
14256 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14257 RCV_BTH_QP_KDETH_QP_SHIFT);
14262 * @dd - device data
14263 * @first_ctxt - first context
14264 * @last_ctxt - first context
14266 * This return sets the qpn mapping table that
14267 * is indexed by qpn[8:1].
14269 * The routine will round robin the 256 settings
14270 * from first_ctxt to last_ctxt.
14272 * The first/last looks ahead to having specialized
14273 * receive contexts for mgmt and bypass. Normal
14274 * verbs traffic will assumed to be on a range
14275 * of receive contexts.
14277 static void init_qpmap_table(struct hfi1_devdata *dd,
14282 u64 regno = RCV_QP_MAP_TABLE;
14284 u64 ctxt = first_ctxt;
14286 for (i = 0; i < 256; i++) {
14287 reg |= ctxt << (8 * (i % 8));
14289 if (ctxt > last_ctxt)
14292 write_csr(dd, regno, reg);
14298 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14299 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14302 struct rsm_map_table {
14303 u64 map[NUM_MAP_REGS];
14307 struct rsm_rule_data {
14323 * Return an initialized RMT map table for users to fill in. OK if it
14324 * returns NULL, indicating no table.
14326 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14328 struct rsm_map_table *rmt;
14329 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14331 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14333 memset(rmt->map, rxcontext, sizeof(rmt->map));
14341 * Write the final RMT map table to the chip and free the table. OK if
14344 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14345 struct rsm_map_table *rmt)
14350 /* write table to chip */
14351 for (i = 0; i < NUM_MAP_REGS; i++)
14352 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14355 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14360 * Add a receive side mapping rule.
14362 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14363 struct rsm_rule_data *rrd)
14365 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14366 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14367 1ull << rule_index | /* enable bit */
14368 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14369 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14370 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14371 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14372 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14373 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14374 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14375 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14376 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14377 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14378 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14379 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14380 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14384 * Clear a receive side mapping rule.
14386 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14388 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14389 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14390 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14393 /* return the number of RSM map table entries that will be used for QOS */
14394 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14401 /* is QOS active at all? */
14402 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14407 /* determine bits for qpn */
14408 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14409 if (krcvqs[i] > max_by_vl)
14410 max_by_vl = krcvqs[i];
14411 if (max_by_vl > 32)
14413 m = ilog2(__roundup_pow_of_two(max_by_vl));
14415 /* determine bits for vl */
14416 n = ilog2(__roundup_pow_of_two(num_vls));
14418 /* reject if too much is used */
14427 return 1 << (m + n);
14438 * init_qos - init RX qos
14439 * @dd - device data
14440 * @rmt - RSM map table
14442 * This routine initializes Rule 0 and the RSM map table to implement
14443 * quality of service (qos).
14445 * If all of the limit tests succeed, qos is applied based on the array
14446 * interpretation of krcvqs where entry 0 is VL0.
14448 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14449 * feed both the RSM map table and the single rule.
14451 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14453 struct rsm_rule_data rrd;
14454 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14455 unsigned int rmt_entries;
14460 rmt_entries = qos_rmt_entries(dd, &m, &n);
14461 if (rmt_entries == 0)
14463 qpns_per_vl = 1 << m;
14465 /* enough room in the map table? */
14466 rmt_entries = 1 << (m + n);
14467 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14470 /* add qos entries to the the RSM map table */
14471 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14474 for (qpn = 0, tctxt = ctxt;
14475 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14476 unsigned idx, regoff, regidx;
14478 /* generate the index the hardware will produce */
14479 idx = rmt->used + ((qpn << n) ^ i);
14480 regoff = (idx % 8) * 8;
14482 /* replace default with context number */
14483 reg = rmt->map[regidx];
14484 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14486 reg |= (u64)(tctxt++) << regoff;
14487 rmt->map[regidx] = reg;
14488 if (tctxt == ctxt + krcvqs[i])
14494 rrd.offset = rmt->used;
14496 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14497 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14498 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14499 rrd.index1_width = n;
14500 rrd.index2_off = QPN_SELECT_OFFSET;
14501 rrd.index2_width = m + n;
14502 rrd.mask1 = LRH_BTH_MASK;
14503 rrd.value1 = LRH_BTH_VALUE;
14504 rrd.mask2 = LRH_SC_MASK;
14505 rrd.value2 = LRH_SC_VALUE;
14508 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14510 /* mark RSM map entries as used */
14511 rmt->used += rmt_entries;
14512 /* map everything else to the mcast/err/vl15 context */
14513 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14514 dd->qos_shift = n + 1;
14518 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14521 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14522 struct rsm_map_table *rmt)
14524 struct rsm_rule_data rrd;
14526 int i, idx, regoff, regidx;
14530 /* there needs to be enough room in the map table */
14531 total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
14532 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14533 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14538 * RSM will extract the destination context as an index into the
14539 * map table. The destination contexts are a sequential block
14540 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14541 * Map entries are accessed as offset + extracted value. Adjust
14542 * the added offset so this sequence can be placed anywhere in
14543 * the table - as long as the entries themselves do not wrap.
14544 * There are only enough bits in offset for the table size, so
14545 * start with that to allow for a "negative" offset.
14547 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14548 (int)dd->first_dyn_alloc_ctxt);
14550 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14551 i < dd->num_rcv_contexts; i++, idx++) {
14552 /* replace with identity mapping */
14553 regoff = (idx % 8) * 8;
14555 reg = rmt->map[regidx];
14556 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14557 reg |= (u64)i << regoff;
14558 rmt->map[regidx] = reg;
14562 * For RSM intercept of Expected FECN packets:
14563 * o packet type 0 - expected
14564 * o match on F (bit 95), using select/match 1, and
14565 * o match on SH (bit 133), using select/match 2.
14567 * Use index 1 to extract the 8-bit receive context from DestQP
14568 * (start at bit 64). Use that as the RSM map table index.
14570 rrd.offset = offset;
14572 rrd.field1_off = 95;
14573 rrd.field2_off = 133;
14574 rrd.index1_off = 64;
14575 rrd.index1_width = 8;
14576 rrd.index2_off = 0;
14577 rrd.index2_width = 0;
14584 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14586 rmt->used += total_cnt;
14589 /* Initialize RSM for VNIC */
14590 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14596 struct rsm_rule_data rrd;
14598 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14599 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14600 dd->vnic.rmt_start);
14604 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14605 dd->vnic.rmt_start,
14606 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14608 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14609 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14610 reg = read_csr(dd, regoff);
14611 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14612 /* Update map register with vnic context */
14613 j = (dd->vnic.rmt_start + i) % 8;
14614 reg &= ~(0xffllu << (j * 8));
14615 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14616 /* Wrap up vnic ctx index */
14617 ctx_id %= dd->vnic.num_ctxt;
14618 /* Write back map register */
14619 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14620 dev_dbg(&(dd)->pcidev->dev,
14621 "Vnic rsm map reg[%d] =0x%llx\n",
14622 regoff - RCV_RSM_MAP_TABLE, reg);
14624 write_csr(dd, regoff, reg);
14626 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14627 reg = read_csr(dd, regoff);
14631 /* Add rule for vnic */
14632 rrd.offset = dd->vnic.rmt_start;
14634 /* Match 16B packets */
14635 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14636 rrd.mask1 = L2_TYPE_MASK;
14637 rrd.value1 = L2_16B_VALUE;
14638 /* Match ETH L4 packets */
14639 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14640 rrd.mask2 = L4_16B_TYPE_MASK;
14641 rrd.value2 = L4_16B_ETH_VALUE;
14642 /* Calc context from veswid and entropy */
14643 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14644 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14645 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14646 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14647 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14649 /* Enable RSM if not already enabled */
14650 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14653 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14655 clear_rsm_rule(dd, RSM_INS_VNIC);
14657 /* Disable RSM if used only by vnic */
14658 if (dd->vnic.rmt_start == 0)
14659 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14662 static int init_rxe(struct hfi1_devdata *dd)
14664 struct rsm_map_table *rmt;
14667 /* enable all receive errors */
14668 write_csr(dd, RCV_ERR_MASK, ~0ull);
14670 rmt = alloc_rsm_map_table(dd);
14674 /* set up QOS, including the QPN map table */
14676 init_user_fecn_handling(dd, rmt);
14677 complete_rsm_map_table(dd, rmt);
14678 /* record number of used rsm map entries for vnic */
14679 dd->vnic.rmt_start = rmt->used;
14683 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14684 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14685 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14686 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14687 * Max_PayLoad_Size set to its minimum of 128.
14689 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14690 * (64 bytes). Max_Payload_Size is possibly modified upward in
14691 * tune_pcie_caps() which is called after this routine.
14694 /* Have 16 bytes (4DW) of bypass header available in header queue */
14695 val = read_csr(dd, RCV_BYPASS);
14696 val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14697 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14698 RCV_BYPASS_HDR_SIZE_SHIFT);
14699 write_csr(dd, RCV_BYPASS, val);
14703 static void init_other(struct hfi1_devdata *dd)
14705 /* enable all CCE errors */
14706 write_csr(dd, CCE_ERR_MASK, ~0ull);
14707 /* enable *some* Misc errors */
14708 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14709 /* enable all DC errors, except LCB */
14710 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14711 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14715 * Fill out the given AU table using the given CU. A CU is defined in terms
14716 * AUs. The table is a an encoding: given the index, how many AUs does that
14719 * NOTE: Assumes that the register layout is the same for the
14720 * local and remote tables.
14722 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14723 u32 csr0to3, u32 csr4to7)
14725 write_csr(dd, csr0to3,
14726 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14727 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14729 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14731 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14732 write_csr(dd, csr4to7,
14734 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14736 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14738 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14740 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14743 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14745 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14746 SEND_CM_LOCAL_AU_TABLE4_TO7);
14749 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14751 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14752 SEND_CM_REMOTE_AU_TABLE4_TO7);
14755 static void init_txe(struct hfi1_devdata *dd)
14759 /* enable all PIO, SDMA, general, and Egress errors */
14760 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14761 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14762 write_csr(dd, SEND_ERR_MASK, ~0ull);
14763 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14765 /* enable all per-context and per-SDMA engine errors */
14766 for (i = 0; i < chip_send_contexts(dd); i++)
14767 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14768 for (i = 0; i < chip_sdma_engines(dd); i++)
14769 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14771 /* set the local CU to AU mapping */
14772 assign_local_cm_au_table(dd, dd->vcu);
14775 * Set reasonable default for Credit Return Timer
14776 * Don't set on Simulator - causes it to choke.
14778 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14779 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14782 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14788 if (!rcd || !rcd->sc)
14791 hw_ctxt = rcd->sc->hw_context;
14792 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14793 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14794 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14795 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14796 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14797 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14798 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14800 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14803 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14804 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14805 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14808 /* Enable J_KEY check on receive context. */
14809 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14810 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14811 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14812 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14817 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14822 if (!rcd || !rcd->sc)
14825 hw_ctxt = rcd->sc->hw_context;
14826 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14828 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14829 * This check would not have been enabled for A0 h/w, see
14833 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14834 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14835 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14837 /* Turn off the J_KEY on the receive side */
14838 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14843 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14849 if (!rcd || !rcd->sc)
14852 hw_ctxt = rcd->sc->hw_context;
14853 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14854 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14855 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14856 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14857 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14858 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14859 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14864 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14869 if (!ctxt || !ctxt->sc)
14872 hw_ctxt = ctxt->sc->hw_context;
14873 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14874 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14875 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14876 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14882 * Start doing the clean up the the chip. Our clean up happens in multiple
14883 * stages and this is just the first.
14885 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14890 finish_chip_resources(dd);
14893 #define HFI_BASE_GUID(dev) \
14894 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14897 * Information can be shared between the two HFIs on the same ASIC
14898 * in the same OS. This function finds the peer device and sets
14899 * up a shared structure.
14901 static int init_asic_data(struct hfi1_devdata *dd)
14903 unsigned long flags;
14904 struct hfi1_devdata *tmp, *peer = NULL;
14905 struct hfi1_asic_data *asic_data;
14908 /* pre-allocate the asic structure in case we are the first device */
14909 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14913 spin_lock_irqsave(&hfi1_devs_lock, flags);
14914 /* Find our peer device */
14915 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14916 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14917 dd->unit != tmp->unit) {
14924 /* use already allocated structure */
14925 dd->asic_data = peer->asic_data;
14928 dd->asic_data = asic_data;
14929 mutex_init(&dd->asic_data->asic_resource_mutex);
14931 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14932 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14934 /* first one through - set up i2c devices */
14936 ret = set_up_i2c(dd, dd->asic_data);
14942 * Set dd->boardname. Use a generic name if a name is not returned from
14943 * EFI variable space.
14945 * Return 0 on success, -ENOMEM if space could not be allocated.
14947 static int obtain_boardname(struct hfi1_devdata *dd)
14949 /* generic board description */
14950 const char generic[] =
14951 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14952 unsigned long size;
14955 ret = read_hfi1_efi_var(dd, "description", &size,
14956 (void **)&dd->boardname);
14958 dd_dev_info(dd, "Board description not found\n");
14959 /* use generic description */
14960 dd->boardname = kstrdup(generic, GFP_KERNEL);
14961 if (!dd->boardname)
14968 * Check the interrupt registers to make sure that they are mapped correctly.
14969 * It is intended to help user identify any mismapping by VMM when the driver
14970 * is running in a VM. This function should only be called before interrupt
14971 * is set up properly.
14973 * Return 0 on success, -EINVAL on failure.
14975 static int check_int_registers(struct hfi1_devdata *dd)
14978 u64 all_bits = ~(u64)0;
14981 /* Clear CceIntMask[0] to avoid raising any interrupts */
14982 mask = read_csr(dd, CCE_INT_MASK);
14983 write_csr(dd, CCE_INT_MASK, 0ull);
14984 reg = read_csr(dd, CCE_INT_MASK);
14988 /* Clear all interrupt status bits */
14989 write_csr(dd, CCE_INT_CLEAR, all_bits);
14990 reg = read_csr(dd, CCE_INT_STATUS);
14994 /* Set all interrupt status bits */
14995 write_csr(dd, CCE_INT_FORCE, all_bits);
14996 reg = read_csr(dd, CCE_INT_STATUS);
14997 if (reg != all_bits)
15000 /* Restore the interrupt mask */
15001 write_csr(dd, CCE_INT_CLEAR, all_bits);
15002 write_csr(dd, CCE_INT_MASK, mask);
15006 write_csr(dd, CCE_INT_MASK, mask);
15007 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
15012 * Allocate and initialize the device structure for the hfi.
15013 * @dev: the pci_dev for hfi1_ib device
15014 * @ent: pci_device_id struct for this dev
15016 * Also allocates, initializes, and returns the devdata struct for this
15019 * This is global, and is called directly at init to set up the
15020 * chip-specific function pointers for later use.
15022 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
15023 const struct pci_device_id *ent)
15025 struct hfi1_devdata *dd;
15026 struct hfi1_pportdata *ppd;
15029 static const char * const inames[] = { /* implementation names */
15031 "RTL VCS simulation",
15032 "RTL FPGA emulation",
15033 "Functional simulator"
15035 struct pci_dev *parent = pdev->bus->self;
15038 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
15039 sizeof(struct hfi1_pportdata));
15042 sdma_engines = chip_sdma_engines(dd);
15044 for (i = 0; i < dd->num_pports; i++, ppd++) {
15046 /* init common fields */
15047 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15048 /* DC supports 4 link widths */
15049 ppd->link_width_supported =
15050 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15051 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15052 ppd->link_width_downgrade_supported =
15053 ppd->link_width_supported;
15054 /* start out enabling only 4X */
15055 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15056 ppd->link_width_downgrade_enabled =
15057 ppd->link_width_downgrade_supported;
15058 /* link width active is 0 when link is down */
15059 /* link width downgrade active is 0 when link is down */
15061 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15062 num_vls > HFI1_MAX_VLS_SUPPORTED) {
15063 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
15064 num_vls, HFI1_MAX_VLS_SUPPORTED);
15065 num_vls = HFI1_MAX_VLS_SUPPORTED;
15067 ppd->vls_supported = num_vls;
15068 ppd->vls_operational = ppd->vls_supported;
15069 /* Set the default MTU. */
15070 for (vl = 0; vl < num_vls; vl++)
15071 dd->vld[vl].mtu = hfi1_max_mtu;
15072 dd->vld[15].mtu = MAX_MAD_PACKET;
15074 * Set the initial values to reasonable default, will be set
15075 * for real when link is up.
15077 ppd->overrun_threshold = 0x4;
15078 ppd->phy_error_threshold = 0xf;
15079 ppd->port_crc_mode_enabled = link_crc_mask;
15080 /* initialize supported LTP CRC mode */
15081 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15082 /* initialize enabled LTP CRC mode */
15083 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15084 /* start in offline */
15085 ppd->host_link_state = HLS_DN_OFFLINE;
15086 init_vl_arb_caches(ppd);
15090 * Do remaining PCIe setup and save PCIe values in dd.
15091 * Any error printing is already done by the init code.
15092 * On return, we have the chip mapped.
15094 ret = hfi1_pcie_ddinit(dd, pdev);
15098 /* Save PCI space registers to rewrite after device reset */
15099 ret = save_pci_variables(dd);
15103 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15104 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15105 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15106 & CCE_REVISION_CHIP_REV_MINOR_MASK;
15109 * Check interrupt registers mapping if the driver has no access to
15110 * the upstream component. In this case, it is likely that the driver
15111 * is running in a VM.
15114 ret = check_int_registers(dd);
15120 * obtain the hardware ID - NOT related to unit, which is a
15121 * software enumeration
15123 reg = read_csr(dd, CCE_REVISION2);
15124 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15125 & CCE_REVISION2_HFI_ID_MASK;
15126 /* the variable size will remove unwanted bits */
15127 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15128 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15129 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15130 dd->icode < ARRAY_SIZE(inames) ?
15131 inames[dd->icode] : "unknown", (int)dd->irev);
15133 /* speeds the hardware can support */
15134 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15135 /* speeds allowed to run at */
15136 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15137 /* give a reasonable active value, will be set on link up */
15138 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15140 /* fix up link widths for emulation _p */
15142 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15143 ppd->link_width_supported =
15144 ppd->link_width_enabled =
15145 ppd->link_width_downgrade_supported =
15146 ppd->link_width_downgrade_enabled =
15149 /* insure num_vls isn't larger than number of sdma engines */
15150 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15151 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15152 num_vls, sdma_engines);
15153 num_vls = sdma_engines;
15154 ppd->vls_supported = sdma_engines;
15155 ppd->vls_operational = ppd->vls_supported;
15159 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15160 * Limit the max if larger than the field holds. If timeout is
15161 * non-zero, then the calculated field will be at least 1.
15163 * Must be after icode is set up - the cclock rate depends
15164 * on knowing the hardware being used.
15166 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15167 if (dd->rcv_intr_timeout_csr >
15168 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15169 dd->rcv_intr_timeout_csr =
15170 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15171 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15172 dd->rcv_intr_timeout_csr = 1;
15174 /* needs to be done before we look for the peer device */
15177 /* set up shared ASIC data with peer device */
15178 ret = init_asic_data(dd);
15182 /* obtain chip sizes, reset chip CSRs */
15183 ret = init_chip(dd);
15187 /* read in the PCIe link speed information */
15188 ret = pcie_speeds(dd);
15192 /* call before get_platform_config(), after init_chip_resources() */
15193 ret = eprom_init(dd);
15195 goto bail_free_rcverr;
15197 /* Needs to be called before hfi1_firmware_init */
15198 get_platform_config(dd);
15200 /* read in firmware */
15201 ret = hfi1_firmware_init(dd);
15206 * In general, the PCIe Gen3 transition must occur after the
15207 * chip has been idled (so it won't initiate any PCIe transactions
15208 * e.g. an interrupt) and before the driver changes any registers
15209 * (the transition will reset the registers).
15211 * In particular, place this call after:
15212 * - init_chip() - the chip will not initiate any PCIe transactions
15213 * - pcie_speeds() - reads the current link speed
15214 * - hfi1_firmware_init() - the needed firmware is ready to be
15217 ret = do_pcie_gen3_transition(dd);
15221 /* start setting dd values and adjusting CSRs */
15222 init_early_variables(dd);
15224 parse_platform_config(dd);
15226 ret = obtain_boardname(dd);
15230 snprintf(dd->boardversion, BOARD_VERS_MAX,
15231 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15232 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15235 (dd->revision >> CCE_REVISION_SW_SHIFT)
15236 & CCE_REVISION_SW_MASK);
15238 ret = set_up_context_variables(dd);
15242 /* set initial RXE CSRs */
15243 ret = init_rxe(dd);
15247 /* set initial TXE CSRs */
15249 /* set initial non-RXE, non-TXE CSRs */
15251 /* set up KDETH QP prefix in both RX and TX CSRs */
15254 ret = hfi1_dev_affinity_init(dd);
15258 /* send contexts must be set up before receive contexts */
15259 ret = init_send_contexts(dd);
15263 ret = hfi1_create_kctxts(dd);
15268 * Initialize aspm, to be done after gen3 transition and setting up
15269 * contexts and before enabling interrupts
15273 ret = init_pervl_scs(dd);
15278 for (i = 0; i < dd->num_pports; ++i) {
15279 ret = sdma_init(dd, i);
15284 /* use contexts created by hfi1_create_kctxts */
15285 ret = set_up_interrupts(dd);
15289 ret = hfi1_comp_vectors_set_up(dd);
15291 goto bail_clear_intr;
15293 /* set up LCB access - must be after set_up_interrupts() */
15294 init_lcb_access(dd);
15297 * Serial number is created from the base guid:
15298 * [27:24] = base guid [38:35]
15299 * [23: 0] = base guid [23: 0]
15301 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15302 (dd->base_guid & 0xFFFFFF) |
15303 ((dd->base_guid >> 11) & 0xF000000));
15305 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15306 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15307 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15309 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15311 goto bail_clear_intr;
15315 ret = init_cntrs(dd);
15317 goto bail_clear_intr;
15319 ret = init_rcverr(dd);
15321 goto bail_free_cntrs;
15323 init_completion(&dd->user_comp);
15325 /* The user refcount starts with one to inidicate an active device */
15326 atomic_set(&dd->user_refcount, 1);
15335 hfi1_comp_vectors_clean_up(dd);
15336 hfi1_clean_up_interrupts(dd);
15338 hfi1_pcie_ddcleanup(dd);
15340 hfi1_free_devdata(dd);
15346 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15350 u32 current_egress_rate = ppd->current_egress_rate;
15351 /* rates here are in units of 10^6 bits/sec */
15353 if (desired_egress_rate == -1)
15354 return 0; /* shouldn't happen */
15356 if (desired_egress_rate >= current_egress_rate)
15357 return 0; /* we can't help go faster, only slower */
15359 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15360 egress_cycles(dw_len * 4, current_egress_rate);
15362 return (u16)delta_cycles;
15366 * create_pbc - build a pbc for transmission
15367 * @flags: special case flags or-ed in built pbc
15368 * @srate: static rate
15370 * @dwlen: dword length (header words + data words + pbc words)
15372 * Create a PBC with the given flags, rate, VL, and length.
15374 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15375 * for verbs, which does not use this PSM feature. The lone other caller
15376 * is for the diagnostic interface which calls this if the user does not
15377 * supply their own PBC.
15379 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15382 u64 pbc, delay = 0;
15384 if (unlikely(srate_mbs))
15385 delay = delay_cycles(ppd, srate_mbs, dw_len);
15388 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15389 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15390 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15391 | (dw_len & PBC_LENGTH_DWS_MASK)
15392 << PBC_LENGTH_DWS_SHIFT;
15397 #define SBUS_THERMAL 0x4f
15398 #define SBUS_THERM_MONITOR_MODE 0x1
15400 #define THERM_FAILURE(dev, ret, reason) \
15402 "Thermal sensor initialization failed: %s (%d)\n", \
15406 * Initialize the thermal sensor.
15408 * After initialization, enable polling of thermal sensor through
15409 * SBus interface. In order for this to work, the SBus Master
15410 * firmware has to be loaded due to the fact that the HW polling
15411 * logic uses SBus interrupts, which are not supported with
15412 * default firmware. Otherwise, no data will be returned through
15413 * the ASIC_STS_THERM CSR.
15415 static int thermal_init(struct hfi1_devdata *dd)
15419 if (dd->icode != ICODE_RTL_SILICON ||
15420 check_chip_resource(dd, CR_THERM_INIT, NULL))
15423 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15425 THERM_FAILURE(dd, ret, "Acquire SBus");
15429 dd_dev_info(dd, "Initializing thermal sensor\n");
15430 /* Disable polling of thermal readings */
15431 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15433 /* Thermal Sensor Initialization */
15434 /* Step 1: Reset the Thermal SBus Receiver */
15435 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15436 RESET_SBUS_RECEIVER, 0);
15438 THERM_FAILURE(dd, ret, "Bus Reset");
15441 /* Step 2: Set Reset bit in Thermal block */
15442 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15443 WRITE_SBUS_RECEIVER, 0x1);
15445 THERM_FAILURE(dd, ret, "Therm Block Reset");
15448 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15449 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15450 WRITE_SBUS_RECEIVER, 0x32);
15452 THERM_FAILURE(dd, ret, "Write Clock Div");
15455 /* Step 4: Select temperature mode */
15456 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15457 WRITE_SBUS_RECEIVER,
15458 SBUS_THERM_MONITOR_MODE);
15460 THERM_FAILURE(dd, ret, "Write Mode Sel");
15463 /* Step 5: De-assert block reset and start conversion */
15464 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15465 WRITE_SBUS_RECEIVER, 0x2);
15467 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15470 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15473 /* Enable polling of thermal readings */
15474 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15476 /* Set initialized flag */
15477 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15479 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15482 release_chip_resource(dd, CR_SBUS);
15486 static void handle_temp_err(struct hfi1_devdata *dd)
15488 struct hfi1_pportdata *ppd = &dd->pport[0];
15490 * Thermal Critical Interrupt
15491 * Put the device into forced freeze mode, take link down to
15492 * offline, and put DC into reset.
15495 "Critical temperature reached! Forcing device into freeze mode!\n");
15496 dd->flags |= HFI1_FORCED_FREEZE;
15497 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15499 * Shut DC down as much and as quickly as possible.
15501 * Step 1: Take the link down to OFFLINE. This will cause the
15502 * 8051 to put the Serdes in reset. However, we don't want to
15503 * go through the entire link state machine since we want to
15504 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15505 * but rather an attempt to save the chip.
15506 * Code below is almost the same as quiet_serdes() but avoids
15507 * all the extra work and the sleeps.
15509 ppd->driver_link_ready = 0;
15510 ppd->link_enabled = 0;
15511 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15514 * Step 2: Shutdown LCB and 8051
15515 * After shutdown, do not restore DC_CFG_RESET value.