2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
68 #define NUM_IB_PORTS 1
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED 0x1
121 #define SEC_PACKET_DROPPED 0x2
122 #define SEC_SC_HALTED 0x4 /* per-context only */
123 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125 #define DEFAULT_KRCVQS 2
126 #define MIN_KERNEL_KCTXTS 2
127 #define FIRST_KERNEL_KCTXT 1
128 /* sizes for both the QP and RSM map tables */
129 #define NUM_MAP_ENTRIES 256
130 #define NUM_MAP_REGS 32
132 /* Bit offset into the GUID which carries HFI id information */
133 #define GUID_HFI_INDEX_SHIFT 39
135 /* extract the emulation revision */
136 #define emulator_rev(dd) ((dd)->irev >> 8)
137 /* parallel and serial emulation versions are 3 and 4 respectively */
138 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
139 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
144 #define IB_PACKET_TYPE 2ull
145 #define QW_SHIFT 6ull
147 #define QPN_WIDTH 7ull
149 /* LRH.BTH: QW 0, OFFSET 48 - for match */
150 #define LRH_BTH_QW 0ull
151 #define LRH_BTH_BIT_OFFSET 48ull
152 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
153 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
154 #define LRH_BTH_SELECT
155 #define LRH_BTH_MASK 3ull
156 #define LRH_BTH_VALUE 2ull
158 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
159 #define LRH_SC_QW 0ull
160 #define LRH_SC_BIT_OFFSET 56ull
161 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
162 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
163 #define LRH_SC_MASK 128ull
164 #define LRH_SC_VALUE 0ull
166 /* SC[n..0] QW 0, OFFSET 60 - for select */
167 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
169 /* QPN[m+n:1] QW 1, OFFSET 1 */
170 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
172 /* defines to build power on SC2VL table */
184 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
185 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
186 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
187 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
188 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
189 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
190 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
191 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
194 #define DC_SC_VL_VAL( \
213 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
214 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
215 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
216 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
217 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
218 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
219 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
220 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
221 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
222 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
223 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
224 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
225 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
226 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
227 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
228 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
231 /* all CceStatus sub-block freeze bits */
232 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
233 | CCE_STATUS_RXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_FROZE_SMASK \
235 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
236 /* all CceStatus sub-block TXE pause bits */
237 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
238 | CCE_STATUS_TXE_PAUSED_SMASK \
239 | CCE_STATUS_SDMA_PAUSED_SMASK)
240 /* all CceStatus sub-block RXE pause bits */
241 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
243 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
244 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
249 static struct flag_table cce_err_status_flags[] = {
250 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
251 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
252 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
253 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
254 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
255 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
256 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
258 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
259 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
260 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
261 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
262 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
264 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
265 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
266 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
267 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
268 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
270 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
272 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
273 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
274 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
275 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
276 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
277 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
278 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
279 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
280 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
282 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
283 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
284 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
285 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
286 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
287 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
288 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
289 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
290 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
291 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
292 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
293 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
294 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
295 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
296 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
297 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
298 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
299 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
300 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
301 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
302 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
303 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
304 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
305 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
306 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
307 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
308 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
309 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
310 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
311 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
312 /*31*/ FLAG_ENTRY0("LATriggered",
313 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
314 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
315 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
316 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
317 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
318 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
319 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
320 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
321 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
322 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
323 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
324 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
325 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
326 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
327 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
328 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
329 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
330 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
331 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
338 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
339 static struct flag_table misc_err_status_flags[] = {
340 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
341 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
342 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
343 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
344 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
345 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
346 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
347 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
348 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
349 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
350 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
351 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
352 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
356 * TXE PIO Error flags and consequences
358 static struct flag_table pio_err_status_flags[] = {
359 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
361 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
362 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
364 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
365 /* 2*/ FLAG_ENTRY("PioCsrParity",
367 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
368 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
370 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
371 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
373 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
374 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
376 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
377 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
379 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
380 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
382 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
383 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
385 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
386 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
388 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
389 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
391 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
392 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
395 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
398 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
400 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
401 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
403 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
404 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
406 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
407 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
409 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
410 /*17*/ FLAG_ENTRY("PioInitSmIn",
412 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
413 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
415 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
416 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
418 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
419 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
421 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
422 /*21*/ FLAG_ENTRY("PioWriteDataParity",
424 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
425 /*22*/ FLAG_ENTRY("PioStateMachine",
427 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
428 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
429 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
430 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
431 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
432 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
433 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
434 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
436 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
437 /*26*/ FLAG_ENTRY("PioVlfSopParity",
439 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
440 /*27*/ FLAG_ENTRY("PioVlFifoParity",
442 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
443 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
445 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
446 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
448 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
450 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
452 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
453 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
455 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
456 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
458 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
459 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
461 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
465 /* TXE PIO errors that cause an SPC freeze */
466 #define ALL_PIO_FREEZE_ERR \
467 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
495 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
498 * TXE SDMA Error flags
500 static struct flag_table sdma_err_status_flags[] = {
501 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
502 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
503 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
504 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
505 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
506 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
507 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
508 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
512 /* TXE SDMA errors that cause an SPC freeze */
513 #define ALL_SDMA_FREEZE_ERR \
514 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
515 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
516 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
518 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
519 #define PORT_DISCARD_EGRESS_ERRS \
520 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
521 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
522 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
525 * TXE Egress Error flags
527 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
528 static struct flag_table egress_err_status_flags[] = {
529 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
530 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
532 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
533 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
534 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
535 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
537 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
538 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
539 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
540 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
542 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
543 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
544 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
545 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
546 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
547 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
548 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
549 SEES(TX_SDMA0_DISALLOWED_PACKET)),
550 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
551 SEES(TX_SDMA1_DISALLOWED_PACKET)),
552 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
553 SEES(TX_SDMA2_DISALLOWED_PACKET)),
554 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
555 SEES(TX_SDMA3_DISALLOWED_PACKET)),
556 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
557 SEES(TX_SDMA4_DISALLOWED_PACKET)),
558 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
559 SEES(TX_SDMA5_DISALLOWED_PACKET)),
560 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
561 SEES(TX_SDMA6_DISALLOWED_PACKET)),
562 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
563 SEES(TX_SDMA7_DISALLOWED_PACKET)),
564 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
565 SEES(TX_SDMA8_DISALLOWED_PACKET)),
566 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
567 SEES(TX_SDMA9_DISALLOWED_PACKET)),
568 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
569 SEES(TX_SDMA10_DISALLOWED_PACKET)),
570 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
571 SEES(TX_SDMA11_DISALLOWED_PACKET)),
572 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
573 SEES(TX_SDMA12_DISALLOWED_PACKET)),
574 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
575 SEES(TX_SDMA13_DISALLOWED_PACKET)),
576 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
577 SEES(TX_SDMA14_DISALLOWED_PACKET)),
578 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
579 SEES(TX_SDMA15_DISALLOWED_PACKET)),
580 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
582 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
584 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
586 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
588 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
590 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
592 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
594 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
595 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
596 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
597 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
598 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
599 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
600 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
601 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
602 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
603 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
604 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
605 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
606 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
607 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
608 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
609 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
610 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
611 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
612 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
613 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
614 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
615 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
616 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
617 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
618 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
619 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
620 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
621 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
622 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
626 * TXE Egress Error Info flags
628 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
629 static struct flag_table egress_err_info_flags[] = {
630 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
631 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
632 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
635 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
636 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
637 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
638 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
639 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
640 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
641 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
642 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
643 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
644 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
645 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
646 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
647 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
648 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
649 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
650 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
651 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
654 /* TXE Egress errors that cause an SPC freeze */
655 #define ALL_TXE_EGRESS_FREEZE_ERR \
656 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
657 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
658 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
659 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
660 | SEES(TX_LAUNCH_CSR_PARITY) \
661 | SEES(TX_SBRD_CTL_CSR_PARITY) \
662 | SEES(TX_CONFIG_PARITY) \
663 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
671 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
672 | SEES(TX_CREDIT_RETURN_PARITY))
675 * TXE Send error flags
677 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
678 static struct flag_table send_err_status_flags[] = {
679 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
680 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
681 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
685 * TXE Send Context Error flags and consequences
687 static struct flag_table sc_err_status_flags[] = {
688 /* 0*/ FLAG_ENTRY("InconsistentSop",
689 SEC_PACKET_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
691 /* 1*/ FLAG_ENTRY("DisallowedPacket",
692 SEC_PACKET_DROPPED | SEC_SC_HALTED,
693 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
694 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
695 SEC_WRITE_DROPPED | SEC_SC_HALTED,
696 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
697 /* 3*/ FLAG_ENTRY("WriteOverflow",
698 SEC_WRITE_DROPPED | SEC_SC_HALTED,
699 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
700 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
701 SEC_WRITE_DROPPED | SEC_SC_HALTED,
702 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
707 * RXE Receive Error flags
709 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
710 static struct flag_table rxe_err_status_flags[] = {
711 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
712 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
713 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
714 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
715 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
716 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
717 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
718 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
719 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
720 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
721 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
722 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
723 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
724 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
725 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
726 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
727 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
728 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
729 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
730 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
731 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
732 RXES(RBUF_BLOCK_LIST_READ_UNC)),
733 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
734 RXES(RBUF_BLOCK_LIST_READ_COR)),
735 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
736 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
737 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
738 RXES(RBUF_CSR_QENT_CNT_PARITY)),
739 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
740 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
741 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
742 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
743 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
744 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
745 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
746 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
747 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
748 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
749 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
750 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
751 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
752 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
753 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
754 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
755 RXES(RBUF_FL_INITDONE_PARITY)),
756 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
757 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
758 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
759 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
760 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
761 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
762 RXES(LOOKUP_DES_PART1_UNC_COR)),
763 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
764 RXES(LOOKUP_DES_PART2_PARITY)),
765 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
766 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
767 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
768 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
769 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
770 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
771 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
772 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
773 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
774 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
775 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
776 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
777 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
778 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
779 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
780 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
781 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
782 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
783 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
784 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
785 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
786 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
789 /* RXE errors that will trigger an SPC freeze */
790 #define ALL_RXE_FREEZE_ERR \
791 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
836 #define RXE_FREEZE_ABORT_MASK \
837 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
838 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
839 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
844 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
845 static struct flag_table dcc_err_flags[] = {
846 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
847 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
848 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
849 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
850 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
851 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
852 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
853 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
854 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
855 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
856 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
857 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
858 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
859 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
860 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
861 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
862 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
863 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
864 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
865 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
866 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
867 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
868 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
869 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
870 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
871 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
872 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
873 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
874 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
875 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
876 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
877 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
878 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
879 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
880 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
881 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
882 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
883 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
884 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
885 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
886 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
887 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
888 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
889 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
890 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
891 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
897 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
898 static struct flag_table lcb_err_flags[] = {
899 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
900 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
901 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
902 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
903 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
904 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
905 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
906 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
907 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
908 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
909 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
910 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
911 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
912 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
913 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
914 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
915 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
916 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
917 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
918 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
919 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
920 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
921 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
922 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
923 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
924 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
925 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
926 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
927 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
928 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
929 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
930 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
931 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
932 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
933 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
934 LCBE(REDUNDANT_FLIT_PARITY_ERR))
940 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
941 static struct flag_table dc8051_err_flags[] = {
942 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
943 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
944 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
945 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
946 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
947 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
948 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
949 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
950 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
951 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
952 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
956 * DC8051 Information Error flags
958 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
960 static struct flag_table dc8051_info_err_flags[] = {
961 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
962 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
963 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
964 FLAG_ENTRY0("Serdes internal loopback failure",
965 FAILED_SERDES_INTERNAL_LOOPBACK),
966 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
967 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
968 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
969 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
970 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
971 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
972 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
973 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
974 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
975 FLAG_ENTRY0("External Device Request Timeout",
976 EXTERNAL_DEVICE_REQ_TIMEOUT),
980 * DC8051 Information Host Information flags
982 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
984 static struct flag_table dc8051_info_host_msg_flags[] = {
985 FLAG_ENTRY0("Host request done", 0x0001),
986 FLAG_ENTRY0("BC SMA message", 0x0002),
987 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
988 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
989 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
990 FLAG_ENTRY0("External device config request", 0x0020),
991 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
992 FLAG_ENTRY0("LinkUp achieved", 0x0080),
993 FLAG_ENTRY0("Link going down", 0x0100),
996 static u32 encoded_size(u32 size);
997 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
998 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
999 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1001 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1002 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1003 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1004 u8 *remote_tx_rate, u16 *link_widths);
1005 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1006 u8 *flag_bits, u16 *link_widths);
1007 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1009 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1010 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1011 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1012 u8 *tx_polarity_inversion,
1013 u8 *rx_polarity_inversion, u8 *max_rate);
1014 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1015 unsigned int context, u64 err_status);
1016 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1017 static void handle_dcc_err(struct hfi1_devdata *dd,
1018 unsigned int context, u64 err_status);
1019 static void handle_lcb_err(struct hfi1_devdata *dd,
1020 unsigned int context, u64 err_status);
1021 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029 static void set_partition_keys(struct hfi1_pportdata *);
1030 static const char *link_state_name(u32 state);
1031 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1033 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1035 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1036 static int thermal_init(struct hfi1_devdata *dd);
1038 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1040 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1041 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1042 static void handle_temp_err(struct hfi1_devdata *);
1043 static void dc_shutdown(struct hfi1_devdata *);
1044 static void dc_start(struct hfi1_devdata *);
1045 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1047 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1050 * Error interrupt table entry. This is used as input to the interrupt
1051 * "clear down" routine used for all second tier error interrupt register.
1052 * Second tier interrupt registers have a single bit representing them
1053 * in the top-level CceIntStatus.
1055 struct err_reg_info {
1056 u32 status; /* status CSR offset */
1057 u32 clear; /* clear CSR offset */
1058 u32 mask; /* mask CSR offset */
1059 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1063 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1064 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1065 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1068 * Helpers for building HFI and DC error interrupt table entries. Different
1069 * helpers are needed because of inconsistent register names.
1071 #define EE(reg, handler, desc) \
1072 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1074 #define DC_EE1(reg, handler, desc) \
1075 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1076 #define DC_EE2(reg, handler, desc) \
1077 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1080 * Table of the "misc" grouping of error interrupts. Each entry refers to
1081 * another register containing more information.
1083 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1084 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1085 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1086 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1087 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1088 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1089 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1090 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1091 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1092 /* the rest are reserved */
1096 * Index into the Various section of the interrupt sources
1097 * corresponding to the Critical Temperature interrupt.
1099 #define TCRIT_INT_SOURCE 4
1102 * SDMA error interrupt entry - refers to another register containing more
1105 static const struct err_reg_info sdma_eng_err =
1106 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1108 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1109 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1110 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1111 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1112 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1113 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1114 /* rest are reserved */
1118 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1119 * register can not be derived from the MTU value because 10K is not
1120 * a power of 2. Therefore, we need a constant. Everything else can
1123 #define DCC_CFG_PORT_MTU_CAP_10240 7
1126 * Table of the DC grouping of error interrupts. Each entry refers to
1127 * another register containing more information.
1129 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1130 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1131 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1132 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1133 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1134 /* the rest are reserved */
1144 * csr to read for name (if applicable)
1149 * offset into dd or ppd to store the counter's value
1159 * accessor for stat element, context either dd or ppd
1161 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1162 int mode, u64 data);
1165 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1166 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1168 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1178 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1180 (counter * 8 + RCV_COUNTER_ARRAY32), \
1181 0, flags | CNTR_32BIT, \
1182 port_access_u32_csr)
1184 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1186 (counter * 8 + RCV_COUNTER_ARRAY32), \
1187 0, flags | CNTR_32BIT, \
1191 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1193 (counter * 8 + RCV_COUNTER_ARRAY64), \
1195 port_access_u64_csr)
1197 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1199 (counter * 8 + RCV_COUNTER_ARRAY64), \
1203 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1204 #define OVR_ELM(ctx) \
1205 CNTR_ELEM("RcvHdrOvr" #ctx, \
1206 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1207 0, CNTR_NORMAL, port_access_u64_csr)
1210 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1212 (counter * 8 + SEND_COUNTER_ARRAY32), \
1213 0, flags | CNTR_32BIT, \
1214 port_access_u32_csr)
1217 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1219 (counter * 8 + SEND_COUNTER_ARRAY64), \
1221 port_access_u64_csr)
1223 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1225 counter * 8 + SEND_COUNTER_ARRAY64, \
1231 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1233 (counter * 8 + CCE_COUNTER_ARRAY32), \
1234 0, flags | CNTR_32BIT, \
1237 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1239 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1240 0, flags | CNTR_32BIT, \
1244 #define DC_PERF_CNTR(name, counter, flags) \
1251 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1259 #define SW_IBP_CNTR(name, cntr) \
1266 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1268 if (dd->flags & HFI1_PRESENT) {
1269 return readq((void __iomem *)dd->kregbase + offset);
1274 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1276 if (dd->flags & HFI1_PRESENT)
1277 writeq(value, (void __iomem *)dd->kregbase + offset);
1280 void __iomem *get_csr_addr(
1281 struct hfi1_devdata *dd,
1284 return (void __iomem *)dd->kregbase + offset;
1287 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1288 int mode, u64 value)
1292 if (mode == CNTR_MODE_R) {
1293 ret = read_csr(dd, csr);
1294 } else if (mode == CNTR_MODE_W) {
1295 write_csr(dd, csr, value);
1298 dd_dev_err(dd, "Invalid cntr register access mode");
1302 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1307 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1308 void *context, int vl, int mode, u64 data)
1310 struct hfi1_devdata *dd = context;
1311 u64 csr = entry->csr;
1313 if (entry->flags & CNTR_SDMA) {
1314 if (vl == CNTR_INVALID_VL)
1318 if (vl != CNTR_INVALID_VL)
1321 return read_write_csr(dd, csr, mode, data);
1324 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1325 void *context, int idx, int mode, u64 data)
1327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1329 if (dd->per_sdma && idx < dd->num_sdma)
1330 return dd->per_sdma[idx].err_cnt;
1334 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1335 void *context, int idx, int mode, u64 data)
1337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1339 if (dd->per_sdma && idx < dd->num_sdma)
1340 return dd->per_sdma[idx].sdma_int_cnt;
1344 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1345 void *context, int idx, int mode, u64 data)
1347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1349 if (dd->per_sdma && idx < dd->num_sdma)
1350 return dd->per_sdma[idx].idle_int_cnt;
1354 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1355 void *context, int idx, int mode,
1358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1360 if (dd->per_sdma && idx < dd->num_sdma)
1361 return dd->per_sdma[idx].progress_int_cnt;
1365 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1366 int vl, int mode, u64 data)
1368 struct hfi1_devdata *dd = context;
1371 u64 csr = entry->csr;
1373 if (entry->flags & CNTR_VL) {
1374 if (vl == CNTR_INVALID_VL)
1378 if (vl != CNTR_INVALID_VL)
1382 val = read_write_csr(dd, csr, mode, data);
1386 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1387 int vl, int mode, u64 data)
1389 struct hfi1_devdata *dd = context;
1390 u32 csr = entry->csr;
1393 if (vl != CNTR_INVALID_VL)
1395 if (mode == CNTR_MODE_R)
1396 ret = read_lcb_csr(dd, csr, &data);
1397 else if (mode == CNTR_MODE_W)
1398 ret = write_lcb_csr(dd, csr, data);
1401 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1405 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1410 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1411 int vl, int mode, u64 data)
1413 struct hfi1_pportdata *ppd = context;
1415 if (vl != CNTR_INVALID_VL)
1417 return read_write_csr(ppd->dd, entry->csr, mode, data);
1420 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1421 void *context, int vl, int mode, u64 data)
1423 struct hfi1_pportdata *ppd = context;
1425 u64 csr = entry->csr;
1427 if (entry->flags & CNTR_VL) {
1428 if (vl == CNTR_INVALID_VL)
1432 if (vl != CNTR_INVALID_VL)
1435 val = read_write_csr(ppd->dd, csr, mode, data);
1439 /* Software defined */
1440 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1445 if (mode == CNTR_MODE_R) {
1447 } else if (mode == CNTR_MODE_W) {
1451 dd_dev_err(dd, "Invalid cntr sw access mode");
1455 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1460 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1461 int vl, int mode, u64 data)
1463 struct hfi1_pportdata *ppd = context;
1465 if (vl != CNTR_INVALID_VL)
1467 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1470 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1471 int vl, int mode, u64 data)
1473 struct hfi1_pportdata *ppd = context;
1475 if (vl != CNTR_INVALID_VL)
1477 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1480 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1481 void *context, int vl, int mode,
1484 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1486 if (vl != CNTR_INVALID_VL)
1488 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1491 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1492 void *context, int vl, int mode, u64 data)
1494 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1498 if (vl == CNTR_INVALID_VL)
1499 counter = &ppd->port_xmit_discards;
1500 else if (vl >= 0 && vl < C_VL_COUNT)
1501 counter = &ppd->port_xmit_discards_vl[vl];
1505 return read_write_sw(ppd->dd, counter, mode, data);
1508 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1509 void *context, int vl, int mode,
1512 struct hfi1_pportdata *ppd = context;
1514 if (vl != CNTR_INVALID_VL)
1517 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1521 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1522 void *context, int vl, int mode, u64 data)
1524 struct hfi1_pportdata *ppd = context;
1526 if (vl != CNTR_INVALID_VL)
1529 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1533 u64 get_all_cpu_total(u64 __percpu *cntr)
1538 for_each_possible_cpu(cpu)
1539 counter += *per_cpu_ptr(cntr, cpu);
1543 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1545 int vl, int mode, u64 data)
1549 if (vl != CNTR_INVALID_VL)
1552 if (mode == CNTR_MODE_R) {
1553 ret = get_all_cpu_total(cntr) - *z_val;
1554 } else if (mode == CNTR_MODE_W) {
1555 /* A write can only zero the counter */
1557 *z_val = get_all_cpu_total(cntr);
1559 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1561 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1568 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1569 void *context, int vl, int mode, u64 data)
1571 struct hfi1_devdata *dd = context;
1573 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1577 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1578 void *context, int vl, int mode, u64 data)
1580 struct hfi1_devdata *dd = context;
1582 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1586 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1589 struct hfi1_devdata *dd = context;
1591 return dd->verbs_dev.n_piowait;
1594 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1599 return dd->verbs_dev.n_piodrain;
1602 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1605 struct hfi1_devdata *dd = context;
1607 return dd->verbs_dev.n_txwait;
1610 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1611 void *context, int vl, int mode, u64 data)
1613 struct hfi1_devdata *dd = context;
1615 return dd->verbs_dev.n_kmem_wait;
1618 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1619 void *context, int vl, int mode, u64 data)
1621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1623 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1627 /* Software counters for the error status bits within MISC_ERR_STATUS */
1628 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1629 void *context, int vl, int mode,
1632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634 return dd->misc_err_status_cnt[12];
1637 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1638 void *context, int vl, int mode,
1641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643 return dd->misc_err_status_cnt[11];
1646 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1647 void *context, int vl, int mode,
1650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652 return dd->misc_err_status_cnt[10];
1655 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1656 void *context, int vl,
1659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661 return dd->misc_err_status_cnt[9];
1664 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1665 void *context, int vl, int mode,
1668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670 return dd->misc_err_status_cnt[8];
1673 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1674 const struct cntr_entry *entry,
1675 void *context, int vl, int mode, u64 data)
1677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679 return dd->misc_err_status_cnt[7];
1682 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1683 void *context, int vl,
1686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688 return dd->misc_err_status_cnt[6];
1691 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1692 void *context, int vl, int mode,
1695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697 return dd->misc_err_status_cnt[5];
1700 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1701 void *context, int vl, int mode,
1704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706 return dd->misc_err_status_cnt[4];
1709 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1710 void *context, int vl,
1713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715 return dd->misc_err_status_cnt[3];
1718 static u64 access_misc_csr_write_bad_addr_err_cnt(
1719 const struct cntr_entry *entry,
1720 void *context, int vl, int mode, u64 data)
1722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1724 return dd->misc_err_status_cnt[2];
1727 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1728 void *context, int vl,
1731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1733 return dd->misc_err_status_cnt[1];
1736 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1737 void *context, int vl, int mode,
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1742 return dd->misc_err_status_cnt[0];
1746 * Software counter for the aggregate of
1747 * individual CceErrStatus counters
1749 static u64 access_sw_cce_err_status_aggregated_cnt(
1750 const struct cntr_entry *entry,
1751 void *context, int vl, int mode, u64 data)
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755 return dd->sw_cce_err_status_aggregate;
1759 * Software counters corresponding to each of the
1760 * error status bits within CceErrStatus
1762 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1763 void *context, int vl, int mode,
1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768 return dd->cce_err_status_cnt[40];
1771 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1772 void *context, int vl, int mode,
1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777 return dd->cce_err_status_cnt[39];
1780 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1781 void *context, int vl, int mode,
1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786 return dd->cce_err_status_cnt[38];
1789 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1790 void *context, int vl, int mode,
1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795 return dd->cce_err_status_cnt[37];
1798 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1799 void *context, int vl, int mode,
1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804 return dd->cce_err_status_cnt[36];
1807 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1808 const struct cntr_entry *entry,
1809 void *context, int vl, int mode, u64 data)
1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813 return dd->cce_err_status_cnt[35];
1816 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1817 const struct cntr_entry *entry,
1818 void *context, int vl, int mode, u64 data)
1820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822 return dd->cce_err_status_cnt[34];
1825 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1826 void *context, int vl,
1829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831 return dd->cce_err_status_cnt[33];
1834 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1835 void *context, int vl, int mode,
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840 return dd->cce_err_status_cnt[32];
1843 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode, u64 data)
1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848 return dd->cce_err_status_cnt[31];
1851 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1852 void *context, int vl, int mode,
1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857 return dd->cce_err_status_cnt[30];
1860 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1861 void *context, int vl, int mode,
1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866 return dd->cce_err_status_cnt[29];
1869 static u64 access_pcic_transmit_back_parity_err_cnt(
1870 const struct cntr_entry *entry,
1871 void *context, int vl, int mode, u64 data)
1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875 return dd->cce_err_status_cnt[28];
1878 static u64 access_pcic_transmit_front_parity_err_cnt(
1879 const struct cntr_entry *entry,
1880 void *context, int vl, int mode, u64 data)
1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884 return dd->cce_err_status_cnt[27];
1887 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1888 void *context, int vl, int mode,
1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893 return dd->cce_err_status_cnt[26];
1896 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1897 void *context, int vl, int mode,
1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902 return dd->cce_err_status_cnt[25];
1905 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1906 void *context, int vl, int mode,
1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911 return dd->cce_err_status_cnt[24];
1914 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1915 void *context, int vl, int mode,
1918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920 return dd->cce_err_status_cnt[23];
1923 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1924 void *context, int vl,
1927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929 return dd->cce_err_status_cnt[22];
1932 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1933 void *context, int vl, int mode,
1936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938 return dd->cce_err_status_cnt[21];
1941 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1942 const struct cntr_entry *entry,
1943 void *context, int vl, int mode, u64 data)
1945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947 return dd->cce_err_status_cnt[20];
1950 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1951 void *context, int vl,
1954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956 return dd->cce_err_status_cnt[19];
1959 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1960 void *context, int vl, int mode,
1963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965 return dd->cce_err_status_cnt[18];
1968 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1969 void *context, int vl, int mode,
1972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974 return dd->cce_err_status_cnt[17];
1977 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1978 void *context, int vl, int mode,
1981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983 return dd->cce_err_status_cnt[16];
1986 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1987 void *context, int vl, int mode,
1990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992 return dd->cce_err_status_cnt[15];
1995 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1996 void *context, int vl,
1999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001 return dd->cce_err_status_cnt[14];
2004 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2005 void *context, int vl, int mode,
2008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010 return dd->cce_err_status_cnt[13];
2013 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2014 const struct cntr_entry *entry,
2015 void *context, int vl, int mode, u64 data)
2017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019 return dd->cce_err_status_cnt[12];
2022 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2023 const struct cntr_entry *entry,
2024 void *context, int vl, int mode, u64 data)
2026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028 return dd->cce_err_status_cnt[11];
2031 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2032 const struct cntr_entry *entry,
2033 void *context, int vl, int mode, u64 data)
2035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037 return dd->cce_err_status_cnt[10];
2040 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2041 const struct cntr_entry *entry,
2042 void *context, int vl, int mode, u64 data)
2044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046 return dd->cce_err_status_cnt[9];
2049 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2050 const struct cntr_entry *entry,
2051 void *context, int vl, int mode, u64 data)
2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055 return dd->cce_err_status_cnt[8];
2058 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2059 void *context, int vl,
2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064 return dd->cce_err_status_cnt[7];
2067 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2068 const struct cntr_entry *entry,
2069 void *context, int vl, int mode, u64 data)
2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073 return dd->cce_err_status_cnt[6];
2076 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2077 void *context, int vl, int mode,
2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082 return dd->cce_err_status_cnt[5];
2085 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2086 void *context, int vl, int mode,
2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091 return dd->cce_err_status_cnt[4];
2094 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2095 const struct cntr_entry *entry,
2096 void *context, int vl, int mode, u64 data)
2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100 return dd->cce_err_status_cnt[3];
2103 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2104 void *context, int vl,
2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2109 return dd->cce_err_status_cnt[2];
2112 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2113 void *context, int vl,
2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2118 return dd->cce_err_status_cnt[1];
2121 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2122 void *context, int vl, int mode,
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2127 return dd->cce_err_status_cnt[0];
2131 * Software counters corresponding to each of the
2132 * error status bits within RcvErrStatus
2134 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2135 void *context, int vl, int mode,
2138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140 return dd->rcv_err_status_cnt[63];
2143 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2144 void *context, int vl,
2147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149 return dd->rcv_err_status_cnt[62];
2152 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2153 void *context, int vl, int mode,
2156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158 return dd->rcv_err_status_cnt[61];
2161 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2162 void *context, int vl, int mode,
2165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167 return dd->rcv_err_status_cnt[60];
2170 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2171 void *context, int vl,
2174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176 return dd->rcv_err_status_cnt[59];
2179 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2180 void *context, int vl,
2183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185 return dd->rcv_err_status_cnt[58];
2188 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2189 void *context, int vl, int mode,
2192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194 return dd->rcv_err_status_cnt[57];
2197 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2198 void *context, int vl, int mode,
2201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203 return dd->rcv_err_status_cnt[56];
2206 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2207 void *context, int vl, int mode,
2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212 return dd->rcv_err_status_cnt[55];
2215 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2216 const struct cntr_entry *entry,
2217 void *context, int vl, int mode, u64 data)
2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221 return dd->rcv_err_status_cnt[54];
2224 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2225 const struct cntr_entry *entry,
2226 void *context, int vl, int mode, u64 data)
2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230 return dd->rcv_err_status_cnt[53];
2233 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2234 void *context, int vl,
2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239 return dd->rcv_err_status_cnt[52];
2242 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2243 void *context, int vl,
2246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248 return dd->rcv_err_status_cnt[51];
2251 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2252 void *context, int vl,
2255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257 return dd->rcv_err_status_cnt[50];
2260 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2261 void *context, int vl,
2264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266 return dd->rcv_err_status_cnt[49];
2269 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2270 void *context, int vl,
2273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275 return dd->rcv_err_status_cnt[48];
2278 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2279 void *context, int vl,
2282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284 return dd->rcv_err_status_cnt[47];
2287 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2288 void *context, int vl, int mode,
2291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293 return dd->rcv_err_status_cnt[46];
2296 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2297 const struct cntr_entry *entry,
2298 void *context, int vl, int mode, u64 data)
2300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302 return dd->rcv_err_status_cnt[45];
2305 static u64 access_rx_lookup_csr_parity_err_cnt(
2306 const struct cntr_entry *entry,
2307 void *context, int vl, int mode, u64 data)
2309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311 return dd->rcv_err_status_cnt[44];
2314 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2315 const struct cntr_entry *entry,
2316 void *context, int vl, int mode, u64 data)
2318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320 return dd->rcv_err_status_cnt[43];
2323 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2324 const struct cntr_entry *entry,
2325 void *context, int vl, int mode, u64 data)
2327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329 return dd->rcv_err_status_cnt[42];
2332 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2333 const struct cntr_entry *entry,
2334 void *context, int vl, int mode, u64 data)
2336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338 return dd->rcv_err_status_cnt[41];
2341 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2342 const struct cntr_entry *entry,
2343 void *context, int vl, int mode, u64 data)
2345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347 return dd->rcv_err_status_cnt[40];
2350 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2351 const struct cntr_entry *entry,
2352 void *context, int vl, int mode, u64 data)
2354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356 return dd->rcv_err_status_cnt[39];
2359 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2360 const struct cntr_entry *entry,
2361 void *context, int vl, int mode, u64 data)
2363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365 return dd->rcv_err_status_cnt[38];
2368 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2369 const struct cntr_entry *entry,
2370 void *context, int vl, int mode, u64 data)
2372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374 return dd->rcv_err_status_cnt[37];
2377 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2378 const struct cntr_entry *entry,
2379 void *context, int vl, int mode, u64 data)
2381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383 return dd->rcv_err_status_cnt[36];
2386 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2387 const struct cntr_entry *entry,
2388 void *context, int vl, int mode, u64 data)
2390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392 return dd->rcv_err_status_cnt[35];
2395 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2396 const struct cntr_entry *entry,
2397 void *context, int vl, int mode, u64 data)
2399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401 return dd->rcv_err_status_cnt[34];
2404 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2405 const struct cntr_entry *entry,
2406 void *context, int vl, int mode, u64 data)
2408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410 return dd->rcv_err_status_cnt[33];
2413 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2414 void *context, int vl, int mode,
2417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419 return dd->rcv_err_status_cnt[32];
2422 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2423 void *context, int vl, int mode,
2426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428 return dd->rcv_err_status_cnt[31];
2431 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2432 void *context, int vl, int mode,
2435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437 return dd->rcv_err_status_cnt[30];
2440 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2441 void *context, int vl, int mode,
2444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446 return dd->rcv_err_status_cnt[29];
2449 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2450 void *context, int vl,
2453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455 return dd->rcv_err_status_cnt[28];
2458 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2459 const struct cntr_entry *entry,
2460 void *context, int vl, int mode, u64 data)
2462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464 return dd->rcv_err_status_cnt[27];
2467 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2468 const struct cntr_entry *entry,
2469 void *context, int vl, int mode, u64 data)
2471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473 return dd->rcv_err_status_cnt[26];
2476 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2477 const struct cntr_entry *entry,
2478 void *context, int vl, int mode, u64 data)
2480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482 return dd->rcv_err_status_cnt[25];
2485 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2486 const struct cntr_entry *entry,
2487 void *context, int vl, int mode, u64 data)
2489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491 return dd->rcv_err_status_cnt[24];
2494 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2495 const struct cntr_entry *entry,
2496 void *context, int vl, int mode, u64 data)
2498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500 return dd->rcv_err_status_cnt[23];
2503 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2504 const struct cntr_entry *entry,
2505 void *context, int vl, int mode, u64 data)
2507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509 return dd->rcv_err_status_cnt[22];
2512 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2513 const struct cntr_entry *entry,
2514 void *context, int vl, int mode, u64 data)
2516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518 return dd->rcv_err_status_cnt[21];
2521 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2522 const struct cntr_entry *entry,
2523 void *context, int vl, int mode, u64 data)
2525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527 return dd->rcv_err_status_cnt[20];
2530 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2531 const struct cntr_entry *entry,
2532 void *context, int vl, int mode, u64 data)
2534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536 return dd->rcv_err_status_cnt[19];
2539 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2540 void *context, int vl,
2543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545 return dd->rcv_err_status_cnt[18];
2548 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2549 void *context, int vl,
2552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554 return dd->rcv_err_status_cnt[17];
2557 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2558 const struct cntr_entry *entry,
2559 void *context, int vl, int mode, u64 data)
2561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563 return dd->rcv_err_status_cnt[16];
2566 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2567 const struct cntr_entry *entry,
2568 void *context, int vl, int mode, u64 data)
2570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572 return dd->rcv_err_status_cnt[15];
2575 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2576 void *context, int vl,
2579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581 return dd->rcv_err_status_cnt[14];
2584 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2585 void *context, int vl,
2588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590 return dd->rcv_err_status_cnt[13];
2593 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2594 void *context, int vl, int mode,
2597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599 return dd->rcv_err_status_cnt[12];
2602 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2603 void *context, int vl, int mode,
2606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608 return dd->rcv_err_status_cnt[11];
2611 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2612 void *context, int vl, int mode,
2615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617 return dd->rcv_err_status_cnt[10];
2620 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2621 void *context, int vl, int mode,
2624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626 return dd->rcv_err_status_cnt[9];
2629 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2630 void *context, int vl, int mode,
2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635 return dd->rcv_err_status_cnt[8];
2638 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2639 const struct cntr_entry *entry,
2640 void *context, int vl, int mode, u64 data)
2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644 return dd->rcv_err_status_cnt[7];
2647 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2648 const struct cntr_entry *entry,
2649 void *context, int vl, int mode, u64 data)
2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653 return dd->rcv_err_status_cnt[6];
2656 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2657 void *context, int vl, int mode,
2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662 return dd->rcv_err_status_cnt[5];
2665 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2666 void *context, int vl, int mode,
2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671 return dd->rcv_err_status_cnt[4];
2674 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2675 void *context, int vl, int mode,
2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680 return dd->rcv_err_status_cnt[3];
2683 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2684 void *context, int vl, int mode,
2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2689 return dd->rcv_err_status_cnt[2];
2692 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2693 void *context, int vl, int mode,
2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2698 return dd->rcv_err_status_cnt[1];
2701 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2702 void *context, int vl, int mode,
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2707 return dd->rcv_err_status_cnt[0];
2711 * Software counters corresponding to each of the
2712 * error status bits within SendPioErrStatus
2714 static u64 access_pio_pec_sop_head_parity_err_cnt(
2715 const struct cntr_entry *entry,
2716 void *context, int vl, int mode, u64 data)
2718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720 return dd->send_pio_err_status_cnt[35];
2723 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2724 const struct cntr_entry *entry,
2725 void *context, int vl, int mode, u64 data)
2727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729 return dd->send_pio_err_status_cnt[34];
2732 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2733 const struct cntr_entry *entry,
2734 void *context, int vl, int mode, u64 data)
2736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738 return dd->send_pio_err_status_cnt[33];
2741 static u64 access_pio_current_free_cnt_parity_err_cnt(
2742 const struct cntr_entry *entry,
2743 void *context, int vl, int mode, u64 data)
2745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747 return dd->send_pio_err_status_cnt[32];
2750 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2751 void *context, int vl, int mode,
2754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756 return dd->send_pio_err_status_cnt[31];
2759 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2760 void *context, int vl, int mode,
2763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765 return dd->send_pio_err_status_cnt[30];
2768 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2769 void *context, int vl, int mode,
2772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774 return dd->send_pio_err_status_cnt[29];
2777 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2778 const struct cntr_entry *entry,
2779 void *context, int vl, int mode, u64 data)
2781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783 return dd->send_pio_err_status_cnt[28];
2786 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2787 void *context, int vl, int mode,
2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792 return dd->send_pio_err_status_cnt[27];
2795 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2796 void *context, int vl, int mode,
2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801 return dd->send_pio_err_status_cnt[26];
2804 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2805 void *context, int vl,
2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810 return dd->send_pio_err_status_cnt[25];
2813 static u64 access_pio_block_qw_count_parity_err_cnt(
2814 const struct cntr_entry *entry,
2815 void *context, int vl, int mode, u64 data)
2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819 return dd->send_pio_err_status_cnt[24];
2822 static u64 access_pio_write_qw_valid_parity_err_cnt(
2823 const struct cntr_entry *entry,
2824 void *context, int vl, int mode, u64 data)
2826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828 return dd->send_pio_err_status_cnt[23];
2831 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2832 void *context, int vl, int mode,
2835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837 return dd->send_pio_err_status_cnt[22];
2840 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2841 void *context, int vl,
2844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846 return dd->send_pio_err_status_cnt[21];
2849 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2850 void *context, int vl,
2853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855 return dd->send_pio_err_status_cnt[20];
2858 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2859 void *context, int vl,
2862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864 return dd->send_pio_err_status_cnt[19];
2867 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2868 const struct cntr_entry *entry,
2869 void *context, int vl, int mode, u64 data)
2871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873 return dd->send_pio_err_status_cnt[18];
2876 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2877 void *context, int vl, int mode,
2880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882 return dd->send_pio_err_status_cnt[17];
2885 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2886 void *context, int vl, int mode,
2889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891 return dd->send_pio_err_status_cnt[16];
2894 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2895 const struct cntr_entry *entry,
2896 void *context, int vl, int mode, u64 data)
2898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900 return dd->send_pio_err_status_cnt[15];
2903 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2904 const struct cntr_entry *entry,
2905 void *context, int vl, int mode, u64 data)
2907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909 return dd->send_pio_err_status_cnt[14];
2912 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2913 const struct cntr_entry *entry,
2914 void *context, int vl, int mode, u64 data)
2916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918 return dd->send_pio_err_status_cnt[13];
2921 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2922 const struct cntr_entry *entry,
2923 void *context, int vl, int mode, u64 data)
2925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927 return dd->send_pio_err_status_cnt[12];
2930 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2931 const struct cntr_entry *entry,
2932 void *context, int vl, int mode, u64 data)
2934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936 return dd->send_pio_err_status_cnt[11];
2939 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2940 const struct cntr_entry *entry,
2941 void *context, int vl, int mode, u64 data)
2943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945 return dd->send_pio_err_status_cnt[10];
2948 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2949 const struct cntr_entry *entry,
2950 void *context, int vl, int mode, u64 data)
2952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954 return dd->send_pio_err_status_cnt[9];
2957 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2958 const struct cntr_entry *entry,
2959 void *context, int vl, int mode, u64 data)
2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963 return dd->send_pio_err_status_cnt[8];
2966 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2967 const struct cntr_entry *entry,
2968 void *context, int vl, int mode, u64 data)
2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972 return dd->send_pio_err_status_cnt[7];
2975 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2976 void *context, int vl, int mode,
2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981 return dd->send_pio_err_status_cnt[6];
2984 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2985 void *context, int vl, int mode,
2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990 return dd->send_pio_err_status_cnt[5];
2993 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2994 void *context, int vl, int mode,
2997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999 return dd->send_pio_err_status_cnt[4];
3002 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3003 void *context, int vl, int mode,
3006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008 return dd->send_pio_err_status_cnt[3];
3011 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3012 void *context, int vl, int mode,
3015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3017 return dd->send_pio_err_status_cnt[2];
3020 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3021 void *context, int vl,
3024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3026 return dd->send_pio_err_status_cnt[1];
3029 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3030 void *context, int vl, int mode,
3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3035 return dd->send_pio_err_status_cnt[0];
3039 * Software counters corresponding to each of the
3040 * error status bits within SendDmaErrStatus
3042 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3043 const struct cntr_entry *entry,
3044 void *context, int vl, int mode, u64 data)
3046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048 return dd->send_dma_err_status_cnt[3];
3051 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3052 const struct cntr_entry *entry,
3053 void *context, int vl, int mode, u64 data)
3055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057 return dd->send_dma_err_status_cnt[2];
3060 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3061 void *context, int vl, int mode,
3064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066 return dd->send_dma_err_status_cnt[1];
3069 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3070 void *context, int vl, int mode,
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075 return dd->send_dma_err_status_cnt[0];
3079 * Software counters corresponding to each of the
3080 * error status bits within SendEgressErrStatus
3082 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3083 const struct cntr_entry *entry,
3084 void *context, int vl, int mode, u64 data)
3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088 return dd->send_egress_err_status_cnt[63];
3091 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3092 const struct cntr_entry *entry,
3093 void *context, int vl, int mode, u64 data)
3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097 return dd->send_egress_err_status_cnt[62];
3100 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3101 void *context, int vl, int mode,
3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106 return dd->send_egress_err_status_cnt[61];
3109 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3110 void *context, int vl,
3113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115 return dd->send_egress_err_status_cnt[60];
3118 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3119 const struct cntr_entry *entry,
3120 void *context, int vl, int mode, u64 data)
3122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124 return dd->send_egress_err_status_cnt[59];
3127 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3128 void *context, int vl, int mode,
3131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133 return dd->send_egress_err_status_cnt[58];
3136 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3137 void *context, int vl, int mode,
3140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142 return dd->send_egress_err_status_cnt[57];
3145 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3146 void *context, int vl, int mode,
3149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151 return dd->send_egress_err_status_cnt[56];
3154 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3155 void *context, int vl, int mode,
3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160 return dd->send_egress_err_status_cnt[55];
3163 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3164 void *context, int vl, int mode,
3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169 return dd->send_egress_err_status_cnt[54];
3172 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3173 void *context, int vl, int mode,
3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178 return dd->send_egress_err_status_cnt[53];
3181 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3182 void *context, int vl, int mode,
3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187 return dd->send_egress_err_status_cnt[52];
3190 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3191 void *context, int vl, int mode,
3194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196 return dd->send_egress_err_status_cnt[51];
3199 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3200 void *context, int vl, int mode,
3203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205 return dd->send_egress_err_status_cnt[50];
3208 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3209 void *context, int vl, int mode,
3212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214 return dd->send_egress_err_status_cnt[49];
3217 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3218 void *context, int vl, int mode,
3221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223 return dd->send_egress_err_status_cnt[48];
3226 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3227 void *context, int vl, int mode,
3230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232 return dd->send_egress_err_status_cnt[47];
3235 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3236 void *context, int vl, int mode,
3239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241 return dd->send_egress_err_status_cnt[46];
3244 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3245 void *context, int vl, int mode,
3248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250 return dd->send_egress_err_status_cnt[45];
3253 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3254 void *context, int vl,
3257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259 return dd->send_egress_err_status_cnt[44];
3262 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3263 const struct cntr_entry *entry,
3264 void *context, int vl, int mode, u64 data)
3266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268 return dd->send_egress_err_status_cnt[43];
3271 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3272 void *context, int vl, int mode,
3275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277 return dd->send_egress_err_status_cnt[42];
3280 static u64 access_tx_credit_return_partiy_err_cnt(
3281 const struct cntr_entry *entry,
3282 void *context, int vl, int mode, u64 data)
3284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286 return dd->send_egress_err_status_cnt[41];
3289 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3290 const struct cntr_entry *entry,
3291 void *context, int vl, int mode, u64 data)
3293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295 return dd->send_egress_err_status_cnt[40];
3298 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3299 const struct cntr_entry *entry,
3300 void *context, int vl, int mode, u64 data)
3302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304 return dd->send_egress_err_status_cnt[39];
3307 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3308 const struct cntr_entry *entry,
3309 void *context, int vl, int mode, u64 data)
3311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313 return dd->send_egress_err_status_cnt[38];
3316 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3317 const struct cntr_entry *entry,
3318 void *context, int vl, int mode, u64 data)
3320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322 return dd->send_egress_err_status_cnt[37];
3325 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3326 const struct cntr_entry *entry,
3327 void *context, int vl, int mode, u64 data)
3329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331 return dd->send_egress_err_status_cnt[36];
3334 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3335 const struct cntr_entry *entry,
3336 void *context, int vl, int mode, u64 data)
3338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340 return dd->send_egress_err_status_cnt[35];
3343 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3344 const struct cntr_entry *entry,
3345 void *context, int vl, int mode, u64 data)
3347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349 return dd->send_egress_err_status_cnt[34];
3352 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3353 const struct cntr_entry *entry,
3354 void *context, int vl, int mode, u64 data)
3356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358 return dd->send_egress_err_status_cnt[33];
3361 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3362 const struct cntr_entry *entry,
3363 void *context, int vl, int mode, u64 data)
3365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367 return dd->send_egress_err_status_cnt[32];
3370 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3371 const struct cntr_entry *entry,
3372 void *context, int vl, int mode, u64 data)
3374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376 return dd->send_egress_err_status_cnt[31];
3379 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3380 const struct cntr_entry *entry,
3381 void *context, int vl, int mode, u64 data)
3383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385 return dd->send_egress_err_status_cnt[30];
3388 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3389 const struct cntr_entry *entry,
3390 void *context, int vl, int mode, u64 data)
3392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394 return dd->send_egress_err_status_cnt[29];
3397 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3398 const struct cntr_entry *entry,
3399 void *context, int vl, int mode, u64 data)
3401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403 return dd->send_egress_err_status_cnt[28];
3406 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3407 const struct cntr_entry *entry,
3408 void *context, int vl, int mode, u64 data)
3410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412 return dd->send_egress_err_status_cnt[27];
3415 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3416 const struct cntr_entry *entry,
3417 void *context, int vl, int mode, u64 data)
3419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421 return dd->send_egress_err_status_cnt[26];
3424 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3425 const struct cntr_entry *entry,
3426 void *context, int vl, int mode, u64 data)
3428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430 return dd->send_egress_err_status_cnt[25];
3433 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3434 const struct cntr_entry *entry,
3435 void *context, int vl, int mode, u64 data)
3437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439 return dd->send_egress_err_status_cnt[24];
3442 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3443 const struct cntr_entry *entry,
3444 void *context, int vl, int mode, u64 data)
3446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448 return dd->send_egress_err_status_cnt[23];
3451 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3452 const struct cntr_entry *entry,
3453 void *context, int vl, int mode, u64 data)
3455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457 return dd->send_egress_err_status_cnt[22];
3460 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3461 const struct cntr_entry *entry,
3462 void *context, int vl, int mode, u64 data)
3464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466 return dd->send_egress_err_status_cnt[21];
3469 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3470 const struct cntr_entry *entry,
3471 void *context, int vl, int mode, u64 data)
3473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475 return dd->send_egress_err_status_cnt[20];
3478 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3479 const struct cntr_entry *entry,
3480 void *context, int vl, int mode, u64 data)
3482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484 return dd->send_egress_err_status_cnt[19];
3487 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3488 const struct cntr_entry *entry,
3489 void *context, int vl, int mode, u64 data)
3491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493 return dd->send_egress_err_status_cnt[18];
3496 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3497 const struct cntr_entry *entry,
3498 void *context, int vl, int mode, u64 data)
3500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502 return dd->send_egress_err_status_cnt[17];
3505 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3506 const struct cntr_entry *entry,
3507 void *context, int vl, int mode, u64 data)
3509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511 return dd->send_egress_err_status_cnt[16];
3514 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3515 void *context, int vl, int mode,
3518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520 return dd->send_egress_err_status_cnt[15];
3523 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3524 void *context, int vl,
3527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529 return dd->send_egress_err_status_cnt[14];
3532 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3533 void *context, int vl, int mode,
3536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538 return dd->send_egress_err_status_cnt[13];
3541 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3542 void *context, int vl, int mode,
3545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547 return dd->send_egress_err_status_cnt[12];
3550 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3551 const struct cntr_entry *entry,
3552 void *context, int vl, int mode, u64 data)
3554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556 return dd->send_egress_err_status_cnt[11];
3559 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3560 void *context, int vl, int mode,
3563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565 return dd->send_egress_err_status_cnt[10];
3568 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3569 void *context, int vl, int mode,
3572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574 return dd->send_egress_err_status_cnt[9];
3577 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3578 const struct cntr_entry *entry,
3579 void *context, int vl, int mode, u64 data)
3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583 return dd->send_egress_err_status_cnt[8];
3586 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3587 const struct cntr_entry *entry,
3588 void *context, int vl, int mode, u64 data)
3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592 return dd->send_egress_err_status_cnt[7];
3595 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3596 void *context, int vl, int mode,
3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601 return dd->send_egress_err_status_cnt[6];
3604 static u64 access_tx_incorrect_link_state_err_cnt(
3605 const struct cntr_entry *entry,
3606 void *context, int vl, int mode, u64 data)
3608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610 return dd->send_egress_err_status_cnt[5];
3613 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3614 void *context, int vl, int mode,
3617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619 return dd->send_egress_err_status_cnt[4];
3622 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3623 const struct cntr_entry *entry,
3624 void *context, int vl, int mode, u64 data)
3626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628 return dd->send_egress_err_status_cnt[3];
3631 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3632 void *context, int vl, int mode,
3635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3637 return dd->send_egress_err_status_cnt[2];
3640 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3641 const struct cntr_entry *entry,
3642 void *context, int vl, int mode, u64 data)
3644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3646 return dd->send_egress_err_status_cnt[1];
3649 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3650 const struct cntr_entry *entry,
3651 void *context, int vl, int mode, u64 data)
3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3655 return dd->send_egress_err_status_cnt[0];
3659 * Software counters corresponding to each of the
3660 * error status bits within SendErrStatus
3662 static u64 access_send_csr_write_bad_addr_err_cnt(
3663 const struct cntr_entry *entry,
3664 void *context, int vl, int mode, u64 data)
3666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668 return dd->send_err_status_cnt[2];
3671 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3672 void *context, int vl,
3675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677 return dd->send_err_status_cnt[1];
3680 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3681 void *context, int vl, int mode,
3684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686 return dd->send_err_status_cnt[0];
3690 * Software counters corresponding to each of the
3691 * error status bits within SendCtxtErrStatus
3693 static u64 access_pio_write_out_of_bounds_err_cnt(
3694 const struct cntr_entry *entry,
3695 void *context, int vl, int mode, u64 data)
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699 return dd->sw_ctxt_err_status_cnt[4];
3702 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3703 void *context, int vl, int mode,
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708 return dd->sw_ctxt_err_status_cnt[3];
3711 static u64 access_pio_write_crosses_boundary_err_cnt(
3712 const struct cntr_entry *entry,
3713 void *context, int vl, int mode, u64 data)
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717 return dd->sw_ctxt_err_status_cnt[2];
3720 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3721 void *context, int vl,
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726 return dd->sw_ctxt_err_status_cnt[1];
3729 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3730 void *context, int vl, int mode,
3733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3735 return dd->sw_ctxt_err_status_cnt[0];
3739 * Software counters corresponding to each of the
3740 * error status bits within SendDmaEngErrStatus
3742 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3743 const struct cntr_entry *entry,
3744 void *context, int vl, int mode, u64 data)
3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748 return dd->sw_send_dma_eng_err_status_cnt[23];
3751 static u64 access_sdma_header_storage_cor_err_cnt(
3752 const struct cntr_entry *entry,
3753 void *context, int vl, int mode, u64 data)
3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757 return dd->sw_send_dma_eng_err_status_cnt[22];
3760 static u64 access_sdma_packet_tracking_cor_err_cnt(
3761 const struct cntr_entry *entry,
3762 void *context, int vl, int mode, u64 data)
3764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766 return dd->sw_send_dma_eng_err_status_cnt[21];
3769 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3770 void *context, int vl, int mode,
3773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775 return dd->sw_send_dma_eng_err_status_cnt[20];
3778 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3779 void *context, int vl, int mode,
3782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784 return dd->sw_send_dma_eng_err_status_cnt[19];
3787 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3788 const struct cntr_entry *entry,
3789 void *context, int vl, int mode, u64 data)
3791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793 return dd->sw_send_dma_eng_err_status_cnt[18];
3796 static u64 access_sdma_header_storage_unc_err_cnt(
3797 const struct cntr_entry *entry,
3798 void *context, int vl, int mode, u64 data)
3800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802 return dd->sw_send_dma_eng_err_status_cnt[17];
3805 static u64 access_sdma_packet_tracking_unc_err_cnt(
3806 const struct cntr_entry *entry,
3807 void *context, int vl, int mode, u64 data)
3809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811 return dd->sw_send_dma_eng_err_status_cnt[16];
3814 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3815 void *context, int vl, int mode,
3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820 return dd->sw_send_dma_eng_err_status_cnt[15];
3823 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3824 void *context, int vl, int mode,
3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829 return dd->sw_send_dma_eng_err_status_cnt[14];
3832 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3833 void *context, int vl, int mode,
3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838 return dd->sw_send_dma_eng_err_status_cnt[13];
3841 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3842 void *context, int vl, int mode,
3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847 return dd->sw_send_dma_eng_err_status_cnt[12];
3850 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3851 void *context, int vl, int mode,
3854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856 return dd->sw_send_dma_eng_err_status_cnt[11];
3859 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3860 void *context, int vl, int mode,
3863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865 return dd->sw_send_dma_eng_err_status_cnt[10];
3868 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3869 void *context, int vl, int mode,
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874 return dd->sw_send_dma_eng_err_status_cnt[9];
3877 static u64 access_sdma_packet_desc_overflow_err_cnt(
3878 const struct cntr_entry *entry,
3879 void *context, int vl, int mode, u64 data)
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883 return dd->sw_send_dma_eng_err_status_cnt[8];
3886 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3887 void *context, int vl,
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892 return dd->sw_send_dma_eng_err_status_cnt[7];
3895 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode, u64 data)
3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900 return dd->sw_send_dma_eng_err_status_cnt[6];
3903 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3904 void *context, int vl, int mode,
3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909 return dd->sw_send_dma_eng_err_status_cnt[5];
3912 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3913 void *context, int vl, int mode,
3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918 return dd->sw_send_dma_eng_err_status_cnt[4];
3921 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3922 const struct cntr_entry *entry,
3923 void *context, int vl, int mode, u64 data)
3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927 return dd->sw_send_dma_eng_err_status_cnt[3];
3930 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3931 void *context, int vl, int mode,
3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936 return dd->sw_send_dma_eng_err_status_cnt[2];
3939 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3940 void *context, int vl, int mode,
3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945 return dd->sw_send_dma_eng_err_status_cnt[1];
3948 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3949 void *context, int vl, int mode,
3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3954 return dd->sw_send_dma_eng_err_status_cnt[0];
3957 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3958 void *context, int vl, int mode,
3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3964 u64 csr = entry->csr;
3966 val = read_write_csr(dd, csr, mode, data);
3967 if (mode == CNTR_MODE_R) {
3968 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3969 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3970 } else if (mode == CNTR_MODE_W) {
3971 dd->sw_rcv_bypass_packet_errors = 0;
3973 dd_dev_err(dd, "Invalid cntr register access mode");
3979 #define def_access_sw_cpu(cntr) \
3980 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3981 void *context, int vl, int mode, u64 data) \
3983 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3984 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3985 ppd->ibport_data.rvp.cntr, vl, \
3989 def_access_sw_cpu(rc_acks);
3990 def_access_sw_cpu(rc_qacks);
3991 def_access_sw_cpu(rc_delayed_comp);
3993 #define def_access_ibp_counter(cntr) \
3994 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3995 void *context, int vl, int mode, u64 data) \
3997 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3999 if (vl != CNTR_INVALID_VL) \
4002 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4006 def_access_ibp_counter(loop_pkts);
4007 def_access_ibp_counter(rc_resends);
4008 def_access_ibp_counter(rnr_naks);
4009 def_access_ibp_counter(other_naks);
4010 def_access_ibp_counter(rc_timeouts);
4011 def_access_ibp_counter(pkt_drops);
4012 def_access_ibp_counter(dmawait);
4013 def_access_ibp_counter(rc_seqnak);
4014 def_access_ibp_counter(rc_dupreq);
4015 def_access_ibp_counter(rdma_seq);
4016 def_access_ibp_counter(unaligned);
4017 def_access_ibp_counter(seq_naks);
4019 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4020 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4021 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4023 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4025 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4026 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4028 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4030 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4031 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4032 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4033 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4034 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4036 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4038 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4040 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4042 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4044 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4046 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4047 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4048 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4049 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4050 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4052 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4053 access_dc_rcv_err_cnt),
4054 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4056 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4058 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4060 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4061 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4062 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4063 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4065 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4066 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4067 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4069 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4071 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4073 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4075 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4077 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4079 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4081 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4082 CNTR_SYNTH | CNTR_VL),
4083 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4084 CNTR_SYNTH | CNTR_VL),
4085 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4086 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4087 CNTR_SYNTH | CNTR_VL),
4088 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4089 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4090 CNTR_SYNTH | CNTR_VL),
4091 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4093 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4094 CNTR_SYNTH | CNTR_VL),
4095 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4097 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4098 CNTR_SYNTH | CNTR_VL),
4100 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4102 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4104 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4106 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4108 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4110 [C_DC_CRC_MULT_LN] =
4111 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4113 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4115 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4117 [C_DC_SEQ_CRC_CNT] =
4118 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4120 [C_DC_ESC0_ONLY_CNT] =
4121 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4123 [C_DC_ESC0_PLUS1_CNT] =
4124 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4126 [C_DC_ESC0_PLUS2_CNT] =
4127 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4129 [C_DC_REINIT_FROM_PEER_CNT] =
4130 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4132 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4134 [C_DC_MISC_FLG_CNT] =
4135 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4137 [C_DC_PRF_GOOD_LTP_CNT] =
4138 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4139 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4140 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4142 [C_DC_PRF_RX_FLIT_CNT] =
4143 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4144 [C_DC_PRF_TX_FLIT_CNT] =
4145 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4146 [C_DC_PRF_CLK_CNTR] =
4147 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4148 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4149 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4150 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4151 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4153 [C_DC_PG_STS_TX_SBE_CNT] =
4154 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4155 [C_DC_PG_STS_TX_MBE_CNT] =
4156 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4158 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4159 access_sw_cpu_intr),
4160 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4161 access_sw_cpu_rcv_limit),
4162 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4163 access_sw_vtx_wait),
4164 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4165 access_sw_pio_wait),
4166 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4167 access_sw_pio_drain),
4168 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4169 access_sw_kmem_wait),
4170 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4171 access_sw_send_schedule),
4172 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4173 SEND_DMA_DESC_FETCHED_CNT, 0,
4174 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4175 dev_access_u32_csr),
4176 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4177 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4178 access_sde_int_cnt),
4179 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4180 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4181 access_sde_err_cnt),
4182 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4183 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4184 access_sde_idle_int_cnt),
4185 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4186 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4187 access_sde_progress_int_cnt),
4188 /* MISC_ERR_STATUS */
4189 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4191 access_misc_pll_lock_fail_err_cnt),
4192 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4194 access_misc_mbist_fail_err_cnt),
4195 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4197 access_misc_invalid_eep_cmd_err_cnt),
4198 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4200 access_misc_efuse_done_parity_err_cnt),
4201 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4203 access_misc_efuse_write_err_cnt),
4204 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4206 access_misc_efuse_read_bad_addr_err_cnt),
4207 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4209 access_misc_efuse_csr_parity_err_cnt),
4210 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4212 access_misc_fw_auth_failed_err_cnt),
4213 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4215 access_misc_key_mismatch_err_cnt),
4216 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4218 access_misc_sbus_write_failed_err_cnt),
4219 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4221 access_misc_csr_write_bad_addr_err_cnt),
4222 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4224 access_misc_csr_read_bad_addr_err_cnt),
4225 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4227 access_misc_csr_parity_err_cnt),
4229 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4231 access_sw_cce_err_status_aggregated_cnt),
4232 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4234 access_cce_msix_csr_parity_err_cnt),
4235 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4237 access_cce_int_map_unc_err_cnt),
4238 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4240 access_cce_int_map_cor_err_cnt),
4241 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4243 access_cce_msix_table_unc_err_cnt),
4244 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4246 access_cce_msix_table_cor_err_cnt),
4247 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4249 access_cce_rxdma_conv_fifo_parity_err_cnt),
4250 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4252 access_cce_rcpl_async_fifo_parity_err_cnt),
4253 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4255 access_cce_seg_write_bad_addr_err_cnt),
4256 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4258 access_cce_seg_read_bad_addr_err_cnt),
4259 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4261 access_la_triggered_cnt),
4262 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4264 access_cce_trgt_cpl_timeout_err_cnt),
4265 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4267 access_pcic_receive_parity_err_cnt),
4268 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4270 access_pcic_transmit_back_parity_err_cnt),
4271 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4273 access_pcic_transmit_front_parity_err_cnt),
4274 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4276 access_pcic_cpl_dat_q_unc_err_cnt),
4277 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4279 access_pcic_cpl_hd_q_unc_err_cnt),
4280 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4282 access_pcic_post_dat_q_unc_err_cnt),
4283 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4285 access_pcic_post_hd_q_unc_err_cnt),
4286 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4288 access_pcic_retry_sot_mem_unc_err_cnt),
4289 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4291 access_pcic_retry_mem_unc_err),
4292 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4294 access_pcic_n_post_dat_q_parity_err_cnt),
4295 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4297 access_pcic_n_post_h_q_parity_err_cnt),
4298 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4300 access_pcic_cpl_dat_q_cor_err_cnt),
4301 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4303 access_pcic_cpl_hd_q_cor_err_cnt),
4304 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4306 access_pcic_post_dat_q_cor_err_cnt),
4307 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4309 access_pcic_post_hd_q_cor_err_cnt),
4310 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4312 access_pcic_retry_sot_mem_cor_err_cnt),
4313 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4315 access_pcic_retry_mem_cor_err_cnt),
4316 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4317 "CceCli1AsyncFifoDbgParityError", 0, 0,
4319 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4320 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4321 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4323 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4325 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4326 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4328 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4329 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4330 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4332 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4333 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4335 access_cce_cli2_async_fifo_parity_err_cnt),
4336 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4338 access_cce_csr_cfg_bus_parity_err_cnt),
4339 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4341 access_cce_cli0_async_fifo_parity_err_cnt),
4342 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4344 access_cce_rspd_data_parity_err_cnt),
4345 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4347 access_cce_trgt_access_err_cnt),
4348 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4350 access_cce_trgt_async_fifo_parity_err_cnt),
4351 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4353 access_cce_csr_write_bad_addr_err_cnt),
4354 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4356 access_cce_csr_read_bad_addr_err_cnt),
4357 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4359 access_ccs_csr_parity_err_cnt),
4362 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4364 access_rx_csr_parity_err_cnt),
4365 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4367 access_rx_csr_write_bad_addr_err_cnt),
4368 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4370 access_rx_csr_read_bad_addr_err_cnt),
4371 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4373 access_rx_dma_csr_unc_err_cnt),
4374 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4376 access_rx_dma_dq_fsm_encoding_err_cnt),
4377 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4379 access_rx_dma_eq_fsm_encoding_err_cnt),
4380 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4382 access_rx_dma_csr_parity_err_cnt),
4383 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4385 access_rx_rbuf_data_cor_err_cnt),
4386 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4388 access_rx_rbuf_data_unc_err_cnt),
4389 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4391 access_rx_dma_data_fifo_rd_cor_err_cnt),
4392 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4394 access_rx_dma_data_fifo_rd_unc_err_cnt),
4395 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4397 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4398 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4400 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4401 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4403 access_rx_rbuf_desc_part2_cor_err_cnt),
4404 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4406 access_rx_rbuf_desc_part2_unc_err_cnt),
4407 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4409 access_rx_rbuf_desc_part1_cor_err_cnt),
4410 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4412 access_rx_rbuf_desc_part1_unc_err_cnt),
4413 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4415 access_rx_hq_intr_fsm_err_cnt),
4416 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4418 access_rx_hq_intr_csr_parity_err_cnt),
4419 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4421 access_rx_lookup_csr_parity_err_cnt),
4422 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4424 access_rx_lookup_rcv_array_cor_err_cnt),
4425 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4427 access_rx_lookup_rcv_array_unc_err_cnt),
4428 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4430 access_rx_lookup_des_part2_parity_err_cnt),
4431 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4433 access_rx_lookup_des_part1_unc_cor_err_cnt),
4434 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4436 access_rx_lookup_des_part1_unc_err_cnt),
4437 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4439 access_rx_rbuf_next_free_buf_cor_err_cnt),
4440 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4442 access_rx_rbuf_next_free_buf_unc_err_cnt),
4443 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4444 "RxRbufFlInitWrAddrParityErr", 0, 0,
4446 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4447 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4449 access_rx_rbuf_fl_initdone_parity_err_cnt),
4450 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4452 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4453 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4455 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4456 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4458 access_rx_rbuf_empty_err_cnt),
4459 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4461 access_rx_rbuf_full_err_cnt),
4462 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4464 access_rbuf_bad_lookup_err_cnt),
4465 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4467 access_rbuf_ctx_id_parity_err_cnt),
4468 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4470 access_rbuf_csr_qeopdw_parity_err_cnt),
4471 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4472 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4474 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4475 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4476 "RxRbufCsrQTlPtrParityErr", 0, 0,
4478 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4479 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4481 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4482 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4484 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4485 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4487 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4488 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4490 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4491 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4492 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4494 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4495 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4497 access_rx_rbuf_block_list_read_cor_err_cnt),
4498 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4500 access_rx_rbuf_block_list_read_unc_err_cnt),
4501 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4503 access_rx_rbuf_lookup_des_cor_err_cnt),
4504 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4506 access_rx_rbuf_lookup_des_unc_err_cnt),
4507 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4508 "RxRbufLookupDesRegUncCorErr", 0, 0,
4510 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4511 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4513 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4514 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4516 access_rx_rbuf_free_list_cor_err_cnt),
4517 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4519 access_rx_rbuf_free_list_unc_err_cnt),
4520 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4522 access_rx_rcv_fsm_encoding_err_cnt),
4523 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4525 access_rx_dma_flag_cor_err_cnt),
4526 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4528 access_rx_dma_flag_unc_err_cnt),
4529 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4531 access_rx_dc_sop_eop_parity_err_cnt),
4532 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4534 access_rx_rcv_csr_parity_err_cnt),
4535 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4537 access_rx_rcv_qp_map_table_cor_err_cnt),
4538 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4540 access_rx_rcv_qp_map_table_unc_err_cnt),
4541 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4543 access_rx_rcv_data_cor_err_cnt),
4544 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4546 access_rx_rcv_data_unc_err_cnt),
4547 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4549 access_rx_rcv_hdr_cor_err_cnt),
4550 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4552 access_rx_rcv_hdr_unc_err_cnt),
4553 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4555 access_rx_dc_intf_parity_err_cnt),
4556 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4558 access_rx_dma_csr_cor_err_cnt),
4559 /* SendPioErrStatus */
4560 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4562 access_pio_pec_sop_head_parity_err_cnt),
4563 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4565 access_pio_pcc_sop_head_parity_err_cnt),
4566 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4568 access_pio_last_returned_cnt_parity_err_cnt),
4569 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4571 access_pio_current_free_cnt_parity_err_cnt),
4572 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4574 access_pio_reserved_31_err_cnt),
4575 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4577 access_pio_reserved_30_err_cnt),
4578 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4580 access_pio_ppmc_sop_len_err_cnt),
4581 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4583 access_pio_ppmc_bqc_mem_parity_err_cnt),
4584 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4586 access_pio_vl_fifo_parity_err_cnt),
4587 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4589 access_pio_vlf_sop_parity_err_cnt),
4590 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4592 access_pio_vlf_v1_len_parity_err_cnt),
4593 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4595 access_pio_block_qw_count_parity_err_cnt),
4596 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4598 access_pio_write_qw_valid_parity_err_cnt),
4599 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4601 access_pio_state_machine_err_cnt),
4602 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4604 access_pio_write_data_parity_err_cnt),
4605 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4607 access_pio_host_addr_mem_cor_err_cnt),
4608 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4610 access_pio_host_addr_mem_unc_err_cnt),
4611 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4613 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4614 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4616 access_pio_init_sm_in_err_cnt),
4617 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4619 access_pio_ppmc_pbl_fifo_err_cnt),
4620 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4622 access_pio_credit_ret_fifo_parity_err_cnt),
4623 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4625 access_pio_v1_len_mem_bank1_cor_err_cnt),
4626 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4628 access_pio_v1_len_mem_bank0_cor_err_cnt),
4629 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4631 access_pio_v1_len_mem_bank1_unc_err_cnt),
4632 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4634 access_pio_v1_len_mem_bank0_unc_err_cnt),
4635 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4637 access_pio_sm_pkt_reset_parity_err_cnt),
4638 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4640 access_pio_pkt_evict_fifo_parity_err_cnt),
4641 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4642 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4644 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4645 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4647 access_pio_sbrdctl_crrel_parity_err_cnt),
4648 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4650 access_pio_pec_fifo_parity_err_cnt),
4651 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4653 access_pio_pcc_fifo_parity_err_cnt),
4654 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4656 access_pio_sb_mem_fifo1_err_cnt),
4657 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4659 access_pio_sb_mem_fifo0_err_cnt),
4660 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4662 access_pio_csr_parity_err_cnt),
4663 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4665 access_pio_write_addr_parity_err_cnt),
4666 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4668 access_pio_write_bad_ctxt_err_cnt),
4669 /* SendDmaErrStatus */
4670 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4672 access_sdma_pcie_req_tracking_cor_err_cnt),
4673 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4675 access_sdma_pcie_req_tracking_unc_err_cnt),
4676 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4678 access_sdma_csr_parity_err_cnt),
4679 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4681 access_sdma_rpy_tag_err_cnt),
4682 /* SendEgressErrStatus */
4683 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4685 access_tx_read_pio_memory_csr_unc_err_cnt),
4686 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4688 access_tx_read_sdma_memory_csr_err_cnt),
4689 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4691 access_tx_egress_fifo_cor_err_cnt),
4692 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4694 access_tx_read_pio_memory_cor_err_cnt),
4695 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4697 access_tx_read_sdma_memory_cor_err_cnt),
4698 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4700 access_tx_sb_hdr_cor_err_cnt),
4701 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4703 access_tx_credit_overrun_err_cnt),
4704 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4706 access_tx_launch_fifo8_cor_err_cnt),
4707 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4709 access_tx_launch_fifo7_cor_err_cnt),
4710 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4712 access_tx_launch_fifo6_cor_err_cnt),
4713 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4715 access_tx_launch_fifo5_cor_err_cnt),
4716 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4718 access_tx_launch_fifo4_cor_err_cnt),
4719 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4721 access_tx_launch_fifo3_cor_err_cnt),
4722 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4724 access_tx_launch_fifo2_cor_err_cnt),
4725 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4727 access_tx_launch_fifo1_cor_err_cnt),
4728 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4730 access_tx_launch_fifo0_cor_err_cnt),
4731 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4733 access_tx_credit_return_vl_err_cnt),
4734 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4736 access_tx_hcrc_insertion_err_cnt),
4737 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4739 access_tx_egress_fifo_unc_err_cnt),
4740 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4742 access_tx_read_pio_memory_unc_err_cnt),
4743 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4745 access_tx_read_sdma_memory_unc_err_cnt),
4746 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4748 access_tx_sb_hdr_unc_err_cnt),
4749 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4751 access_tx_credit_return_partiy_err_cnt),
4752 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4754 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4755 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4757 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4758 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4760 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4761 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4763 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4764 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4766 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4767 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4769 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4770 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4772 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4773 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4775 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4776 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4778 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4779 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4781 access_tx_sdma15_disallowed_packet_err_cnt),
4782 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4784 access_tx_sdma14_disallowed_packet_err_cnt),
4785 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4787 access_tx_sdma13_disallowed_packet_err_cnt),
4788 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4790 access_tx_sdma12_disallowed_packet_err_cnt),
4791 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4793 access_tx_sdma11_disallowed_packet_err_cnt),
4794 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4796 access_tx_sdma10_disallowed_packet_err_cnt),
4797 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4799 access_tx_sdma9_disallowed_packet_err_cnt),
4800 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4802 access_tx_sdma8_disallowed_packet_err_cnt),
4803 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4805 access_tx_sdma7_disallowed_packet_err_cnt),
4806 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4808 access_tx_sdma6_disallowed_packet_err_cnt),
4809 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4811 access_tx_sdma5_disallowed_packet_err_cnt),
4812 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4814 access_tx_sdma4_disallowed_packet_err_cnt),
4815 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4817 access_tx_sdma3_disallowed_packet_err_cnt),
4818 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4820 access_tx_sdma2_disallowed_packet_err_cnt),
4821 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4823 access_tx_sdma1_disallowed_packet_err_cnt),
4824 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4826 access_tx_sdma0_disallowed_packet_err_cnt),
4827 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4829 access_tx_config_parity_err_cnt),
4830 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4832 access_tx_sbrd_ctl_csr_parity_err_cnt),
4833 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4835 access_tx_launch_csr_parity_err_cnt),
4836 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4838 access_tx_illegal_vl_err_cnt),
4839 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4840 "TxSbrdCtlStateMachineParityErr", 0, 0,
4842 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4843 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4845 access_egress_reserved_10_err_cnt),
4846 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4848 access_egress_reserved_9_err_cnt),
4849 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4851 access_tx_sdma_launch_intf_parity_err_cnt),
4852 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4854 access_tx_pio_launch_intf_parity_err_cnt),
4855 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4857 access_egress_reserved_6_err_cnt),
4858 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4860 access_tx_incorrect_link_state_err_cnt),
4861 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4863 access_tx_linkdown_err_cnt),
4864 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4865 "EgressFifoUnderrunOrParityErr", 0, 0,
4867 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4868 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4870 access_egress_reserved_2_err_cnt),
4871 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4873 access_tx_pkt_integrity_mem_unc_err_cnt),
4874 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4876 access_tx_pkt_integrity_mem_cor_err_cnt),
4878 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4880 access_send_csr_write_bad_addr_err_cnt),
4881 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4883 access_send_csr_read_bad_addr_err_cnt),
4884 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4886 access_send_csr_parity_cnt),
4887 /* SendCtxtErrStatus */
4888 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4890 access_pio_write_out_of_bounds_err_cnt),
4891 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4893 access_pio_write_overflow_err_cnt),
4894 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4896 access_pio_write_crosses_boundary_err_cnt),
4897 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4899 access_pio_disallowed_packet_err_cnt),
4900 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4902 access_pio_inconsistent_sop_err_cnt),
4903 /* SendDmaEngErrStatus */
4904 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4906 access_sdma_header_request_fifo_cor_err_cnt),
4907 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4909 access_sdma_header_storage_cor_err_cnt),
4910 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4912 access_sdma_packet_tracking_cor_err_cnt),
4913 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4915 access_sdma_assembly_cor_err_cnt),
4916 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4918 access_sdma_desc_table_cor_err_cnt),
4919 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4921 access_sdma_header_request_fifo_unc_err_cnt),
4922 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4924 access_sdma_header_storage_unc_err_cnt),
4925 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4927 access_sdma_packet_tracking_unc_err_cnt),
4928 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4930 access_sdma_assembly_unc_err_cnt),
4931 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4933 access_sdma_desc_table_unc_err_cnt),
4934 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4936 access_sdma_timeout_err_cnt),
4937 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4939 access_sdma_header_length_err_cnt),
4940 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4942 access_sdma_header_address_err_cnt),
4943 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4945 access_sdma_header_select_err_cnt),
4946 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4948 access_sdma_reserved_9_err_cnt),
4949 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4951 access_sdma_packet_desc_overflow_err_cnt),
4952 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4954 access_sdma_length_mismatch_err_cnt),
4955 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4957 access_sdma_halt_err_cnt),
4958 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4960 access_sdma_mem_read_err_cnt),
4961 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4963 access_sdma_first_desc_err_cnt),
4964 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4966 access_sdma_tail_out_of_bounds_err_cnt),
4967 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4969 access_sdma_too_long_err_cnt),
4970 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4972 access_sdma_gen_mismatch_err_cnt),
4973 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4975 access_sdma_wrong_dw_err_cnt),
4978 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4979 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4981 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4983 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4985 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4987 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4989 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4991 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4993 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4994 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4995 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4996 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4997 CNTR_SYNTH | CNTR_VL),
4998 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4999 CNTR_SYNTH | CNTR_VL),
5000 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5001 CNTR_SYNTH | CNTR_VL),
5002 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5003 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5004 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5005 access_sw_link_dn_cnt),
5006 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5007 access_sw_link_up_cnt),
5008 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5009 access_sw_unknown_frame_cnt),
5010 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5011 access_sw_xmit_discards),
5012 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5013 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5014 access_sw_xmit_discards),
5015 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5016 access_xmit_constraint_errs),
5017 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5018 access_rcv_constraint_errs),
5019 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5020 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5021 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5022 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5023 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5024 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5025 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5026 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5027 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5028 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5029 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5030 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5031 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5032 access_sw_cpu_rc_acks),
5033 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5034 access_sw_cpu_rc_qacks),
5035 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5036 access_sw_cpu_rc_delayed_comp),
5037 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5038 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5039 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5040 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5041 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5042 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5043 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5044 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5045 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5046 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5047 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5048 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5049 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5050 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5051 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5052 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5053 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5054 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5055 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5056 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5057 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5058 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5059 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5060 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5061 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5062 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5063 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5064 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5065 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5066 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5067 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5068 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5069 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5070 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5071 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5072 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5073 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5074 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5075 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5076 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5077 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5078 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5079 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5080 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5081 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5082 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5083 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5084 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5085 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5086 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5087 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5088 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5089 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5090 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5091 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5092 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5093 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5094 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5095 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5096 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5097 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5098 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5099 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5100 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5101 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5102 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5103 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5104 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5105 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5106 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5107 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5108 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5109 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5110 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5111 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5112 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5113 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5114 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5115 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5116 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5119 /* ======================================================================== */
5121 /* return true if this is chip revision revision a */
5122 int is_ax(struct hfi1_devdata *dd)
5125 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5126 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5127 return (chip_rev_minor & 0xf0) == 0;
5130 /* return true if this is chip revision revision b */
5131 int is_bx(struct hfi1_devdata *dd)
5134 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5135 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5136 return (chip_rev_minor & 0xF0) == 0x10;
5140 * Append string s to buffer buf. Arguments curp and len are the current
5141 * position and remaining length, respectively.
5143 * return 0 on success, 1 on out of room
5145 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5149 int result = 0; /* success */
5152 /* add a comma, if first in the buffer */
5155 result = 1; /* out of room */
5162 /* copy the string */
5163 while ((c = *s++) != 0) {
5165 result = 1; /* out of room */
5173 /* write return values */
5181 * Using the given flag table, print a comma separated string into
5182 * the buffer. End in '*' if the buffer is too short.
5184 static char *flag_string(char *buf, int buf_len, u64 flags,
5185 struct flag_table *table, int table_size)
5193 /* make sure there is at least 2 so we can form "*" */
5197 len--; /* leave room for a nul */
5198 for (i = 0; i < table_size; i++) {
5199 if (flags & table[i].flag) {
5200 no_room = append_str(buf, &p, &len, table[i].str);
5203 flags &= ~table[i].flag;
5207 /* any undocumented bits left? */
5208 if (!no_room && flags) {
5209 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5210 no_room = append_str(buf, &p, &len, extra);
5213 /* add * if ran out of room */
5215 /* may need to back up to add space for a '*' */
5221 /* add final nul - space already allocated above */
5226 /* first 8 CCE error interrupt source names */
5227 static const char * const cce_misc_names[] = {
5228 "CceErrInt", /* 0 */
5229 "RxeErrInt", /* 1 */
5230 "MiscErrInt", /* 2 */
5231 "Reserved3", /* 3 */
5232 "PioErrInt", /* 4 */
5233 "SDmaErrInt", /* 5 */
5234 "EgressErrInt", /* 6 */
5239 * Return the miscellaneous error interrupt name.
5241 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5243 if (source < ARRAY_SIZE(cce_misc_names))
5244 strncpy(buf, cce_misc_names[source], bsize);
5246 snprintf(buf, bsize, "Reserved%u",
5247 source + IS_GENERAL_ERR_START);
5253 * Return the SDMA engine error interrupt name.
5255 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5257 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5262 * Return the send context error interrupt name.
5264 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5266 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5270 static const char * const various_names[] = {
5279 * Return the various interrupt name.
5281 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5283 if (source < ARRAY_SIZE(various_names))
5284 strncpy(buf, various_names[source], bsize);
5286 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5291 * Return the DC interrupt name.
5293 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5295 static const char * const dc_int_names[] = {
5299 "lbm" /* local block merge */
5302 if (source < ARRAY_SIZE(dc_int_names))
5303 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5305 snprintf(buf, bsize, "DCInt%u", source);
5309 static const char * const sdma_int_names[] = {
5316 * Return the SDMA engine interrupt name.
5318 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5320 /* what interrupt */
5321 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5323 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5325 if (likely(what < 3))
5326 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5328 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5333 * Return the receive available interrupt name.
5335 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5337 snprintf(buf, bsize, "RcvAvailInt%u", source);
5342 * Return the receive urgent interrupt name.
5344 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5346 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5351 * Return the send credit interrupt name.
5353 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5355 snprintf(buf, bsize, "SendCreditInt%u", source);
5360 * Return the reserved interrupt name.
5362 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5364 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5368 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5370 return flag_string(buf, buf_len, flags,
5371 cce_err_status_flags,
5372 ARRAY_SIZE(cce_err_status_flags));
5375 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5377 return flag_string(buf, buf_len, flags,
5378 rxe_err_status_flags,
5379 ARRAY_SIZE(rxe_err_status_flags));
5382 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5384 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5385 ARRAY_SIZE(misc_err_status_flags));
5388 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5390 return flag_string(buf, buf_len, flags,
5391 pio_err_status_flags,
5392 ARRAY_SIZE(pio_err_status_flags));
5395 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5397 return flag_string(buf, buf_len, flags,
5398 sdma_err_status_flags,
5399 ARRAY_SIZE(sdma_err_status_flags));
5402 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5404 return flag_string(buf, buf_len, flags,
5405 egress_err_status_flags,
5406 ARRAY_SIZE(egress_err_status_flags));
5409 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5411 return flag_string(buf, buf_len, flags,
5412 egress_err_info_flags,
5413 ARRAY_SIZE(egress_err_info_flags));
5416 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5418 return flag_string(buf, buf_len, flags,
5419 send_err_status_flags,
5420 ARRAY_SIZE(send_err_status_flags));
5423 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5429 * For most these errors, there is nothing that can be done except
5430 * report or record it.
5432 dd_dev_info(dd, "CCE Error: %s\n",
5433 cce_err_status_string(buf, sizeof(buf), reg));
5435 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5436 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5437 /* this error requires a manual drop into SPC freeze mode */
5439 start_freeze_handling(dd->pport, FREEZE_SELF);
5442 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5443 if (reg & (1ull << i)) {
5444 incr_cntr64(&dd->cce_err_status_cnt[i]);
5445 /* maintain a counter over all cce_err_status errors */
5446 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5452 * Check counters for receive errors that do not have an interrupt
5453 * associated with them.
5455 #define RCVERR_CHECK_TIME 10
5456 static void update_rcverr_timer(unsigned long opaque)
5458 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5459 struct hfi1_pportdata *ppd = dd->pport;
5460 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5462 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5463 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5464 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5465 set_link_down_reason(
5466 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5467 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5468 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5470 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5472 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5475 static int init_rcverr(struct hfi1_devdata *dd)
5477 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5478 /* Assume the hardware counter has been reset */
5479 dd->rcv_ovfl_cnt = 0;
5480 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5483 static void free_rcverr(struct hfi1_devdata *dd)
5485 if (dd->rcverr_timer.data)
5486 del_timer_sync(&dd->rcverr_timer);
5487 dd->rcverr_timer.data = 0;
5490 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5495 dd_dev_info(dd, "Receive Error: %s\n",
5496 rxe_err_status_string(buf, sizeof(buf), reg));
5498 if (reg & ALL_RXE_FREEZE_ERR) {
5502 * Freeze mode recovery is disabled for the errors
5503 * in RXE_FREEZE_ABORT_MASK
5505 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5506 flags = FREEZE_ABORT;
5508 start_freeze_handling(dd->pport, flags);
5511 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5512 if (reg & (1ull << i))
5513 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5517 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5522 dd_dev_info(dd, "Misc Error: %s",
5523 misc_err_status_string(buf, sizeof(buf), reg));
5524 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5525 if (reg & (1ull << i))
5526 incr_cntr64(&dd->misc_err_status_cnt[i]);
5530 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5535 dd_dev_info(dd, "PIO Error: %s\n",
5536 pio_err_status_string(buf, sizeof(buf), reg));
5538 if (reg & ALL_PIO_FREEZE_ERR)
5539 start_freeze_handling(dd->pport, 0);
5541 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5542 if (reg & (1ull << i))
5543 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5547 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5552 dd_dev_info(dd, "SDMA Error: %s\n",
5553 sdma_err_status_string(buf, sizeof(buf), reg));
5555 if (reg & ALL_SDMA_FREEZE_ERR)
5556 start_freeze_handling(dd->pport, 0);
5558 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5559 if (reg & (1ull << i))
5560 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5564 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5566 incr_cntr64(&ppd->port_xmit_discards);
5569 static void count_port_inactive(struct hfi1_devdata *dd)
5571 __count_port_discards(dd->pport);
5575 * We have had a "disallowed packet" error during egress. Determine the
5576 * integrity check which failed, and update relevant error counter, etc.
5578 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5579 * bit of state per integrity check, and so we can miss the reason for an
5580 * egress error if more than one packet fails the same integrity check
5581 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5583 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5586 struct hfi1_pportdata *ppd = dd->pport;
5587 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5588 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5591 /* clear down all observed info as quickly as possible after read */
5592 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5595 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5596 info, egress_err_info_string(buf, sizeof(buf), info), src);
5598 /* Eventually add other counters for each bit */
5599 if (info & PORT_DISCARD_EGRESS_ERRS) {
5603 * Count all applicable bits as individual errors and
5604 * attribute them to the packet that triggered this handler.
5605 * This may not be completely accurate due to limitations
5606 * on the available hardware error information. There is
5607 * a single information register and any number of error
5608 * packets may have occurred and contributed to it before
5609 * this routine is called. This means that:
5610 * a) If multiple packets with the same error occur before
5611 * this routine is called, earlier packets are missed.
5612 * There is only a single bit for each error type.
5613 * b) Errors may not be attributed to the correct VL.
5614 * The driver is attributing all bits in the info register
5615 * to the packet that triggered this call, but bits
5616 * could be an accumulation of different packets with
5618 * c) A single error packet may have multiple counts attached
5619 * to it. There is no way for the driver to know if
5620 * multiple bits set in the info register are due to a
5621 * single packet or multiple packets. The driver assumes
5624 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5625 for (i = 0; i < weight; i++) {
5626 __count_port_discards(ppd);
5627 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5628 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5630 incr_cntr64(&ppd->port_xmit_discards_vl
5637 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5638 * register. Does it represent a 'port inactive' error?
5640 static inline int port_inactive_err(u64 posn)
5642 return (posn >= SEES(TX_LINKDOWN) &&
5643 posn <= SEES(TX_INCORRECT_LINK_STATE));
5647 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5648 * register. Does it represent a 'disallowed packet' error?
5650 static inline int disallowed_pkt_err(int posn)
5652 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5653 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5657 * Input value is a bit position of one of the SDMA engine disallowed
5658 * packet errors. Return which engine. Use of this must be guarded by
5659 * disallowed_pkt_err().
5661 static inline int disallowed_pkt_engine(int posn)
5663 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5667 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5670 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5672 struct sdma_vl_map *m;
5676 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5680 m = rcu_dereference(dd->sdma_map);
5681 vl = m->engine_to_vl[engine];
5688 * Translate the send context (sofware index) into a VL. Return -1 if the
5689 * translation cannot be done.
5691 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5693 struct send_context_info *sci;
5694 struct send_context *sc;
5697 sci = &dd->send_contexts[sw_index];
5699 /* there is no information for user (PSM) and ack contexts */
5700 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5706 if (dd->vld[15].sc == sc)
5708 for (i = 0; i < num_vls; i++)
5709 if (dd->vld[i].sc == sc)
5715 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5717 u64 reg_copy = reg, handled = 0;
5721 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5722 start_freeze_handling(dd->pport, 0);
5723 else if (is_ax(dd) &&
5724 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5725 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5726 start_freeze_handling(dd->pport, 0);
5729 int posn = fls64(reg_copy);
5730 /* fls64() returns a 1-based offset, we want it zero based */
5731 int shift = posn - 1;
5732 u64 mask = 1ULL << shift;
5734 if (port_inactive_err(shift)) {
5735 count_port_inactive(dd);
5737 } else if (disallowed_pkt_err(shift)) {
5738 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5740 handle_send_egress_err_info(dd, vl);
5749 dd_dev_info(dd, "Egress Error: %s\n",
5750 egress_err_status_string(buf, sizeof(buf), reg));
5752 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5753 if (reg & (1ull << i))
5754 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5758 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5763 dd_dev_info(dd, "Send Error: %s\n",
5764 send_err_status_string(buf, sizeof(buf), reg));
5766 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5767 if (reg & (1ull << i))
5768 incr_cntr64(&dd->send_err_status_cnt[i]);
5773 * The maximum number of times the error clear down will loop before
5774 * blocking a repeating error. This value is arbitrary.
5776 #define MAX_CLEAR_COUNT 20
5779 * Clear and handle an error register. All error interrupts are funneled
5780 * through here to have a central location to correctly handle single-
5781 * or multi-shot errors.
5783 * For non per-context registers, call this routine with a context value
5784 * of 0 so the per-context offset is zero.
5786 * If the handler loops too many times, assume that something is wrong
5787 * and can't be fixed, so mask the error bits.
5789 static void interrupt_clear_down(struct hfi1_devdata *dd,
5791 const struct err_reg_info *eri)
5796 /* read in a loop until no more errors are seen */
5799 reg = read_kctxt_csr(dd, context, eri->status);
5802 write_kctxt_csr(dd, context, eri->clear, reg);
5803 if (likely(eri->handler))
5804 eri->handler(dd, context, reg);
5806 if (count > MAX_CLEAR_COUNT) {
5809 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5812 * Read-modify-write so any other masked bits
5815 mask = read_kctxt_csr(dd, context, eri->mask);
5817 write_kctxt_csr(dd, context, eri->mask, mask);
5824 * CCE block "misc" interrupt. Source is < 16.
5826 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5828 const struct err_reg_info *eri = &misc_errs[source];
5831 interrupt_clear_down(dd, 0, eri);
5833 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5838 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5840 return flag_string(buf, buf_len, flags,
5841 sc_err_status_flags,
5842 ARRAY_SIZE(sc_err_status_flags));
5846 * Send context error interrupt. Source (hw_context) is < 160.
5848 * All send context errors cause the send context to halt. The normal
5849 * clear-down mechanism cannot be used because we cannot clear the
5850 * error bits until several other long-running items are done first.
5851 * This is OK because with the context halted, nothing else is going
5852 * to happen on it anyway.
5854 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5855 unsigned int hw_context)
5857 struct send_context_info *sci;
5858 struct send_context *sc;
5864 sw_index = dd->hw_to_sw[hw_context];
5865 if (sw_index >= dd->num_send_contexts) {
5867 "out of range sw index %u for send context %u\n",
5868 sw_index, hw_context);
5871 sci = &dd->send_contexts[sw_index];
5874 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5875 sw_index, hw_context);
5879 /* tell the software that a halt has begun */
5880 sc_stop(sc, SCF_HALTED);
5882 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5884 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5885 send_context_err_status_string(flags, sizeof(flags),
5888 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5889 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5892 * Automatically restart halted kernel contexts out of interrupt
5893 * context. User contexts must ask the driver to restart the context.
5895 if (sc->type != SC_USER)
5896 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5899 * Update the counters for the corresponding status bits.
5900 * Note that these particular counters are aggregated over all
5903 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5904 if (status & (1ull << i))
5905 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5909 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5910 unsigned int source, u64 status)
5912 struct sdma_engine *sde;
5915 sde = &dd->per_sdma[source];
5916 #ifdef CONFIG_SDMA_VERBOSITY
5917 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5918 slashstrip(__FILE__), __LINE__, __func__);
5919 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5920 sde->this_idx, source, (unsigned long long)status);
5923 sdma_engine_error(sde, status);
5926 * Update the counters for the corresponding status bits.
5927 * Note that these particular counters are aggregated over
5928 * all 16 DMA engines.
5930 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5931 if (status & (1ull << i))
5932 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5937 * CCE block SDMA error interrupt. Source is < 16.
5939 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5941 #ifdef CONFIG_SDMA_VERBOSITY
5942 struct sdma_engine *sde = &dd->per_sdma[source];
5944 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5945 slashstrip(__FILE__), __LINE__, __func__);
5946 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5948 sdma_dumpstate(sde);
5950 interrupt_clear_down(dd, source, &sdma_eng_err);
5954 * CCE block "various" interrupt. Source is < 8.
5956 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5958 const struct err_reg_info *eri = &various_err[source];
5961 * TCritInt cannot go through interrupt_clear_down()
5962 * because it is not a second tier interrupt. The handler
5963 * should be called directly.
5965 if (source == TCRIT_INT_SOURCE)
5966 handle_temp_err(dd);
5967 else if (eri->handler)
5968 interrupt_clear_down(dd, 0, eri);
5971 "%s: Unimplemented/reserved interrupt %d\n",
5975 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5977 /* src_ctx is always zero */
5978 struct hfi1_pportdata *ppd = dd->pport;
5979 unsigned long flags;
5980 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5982 if (reg & QSFP_HFI0_MODPRST_N) {
5983 if (!qsfp_mod_present(ppd)) {
5984 dd_dev_info(dd, "%s: QSFP module removed\n",
5987 ppd->driver_link_ready = 0;
5989 * Cable removed, reset all our information about the
5990 * cache and cable capabilities
5993 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5995 * We don't set cache_refresh_required here as we expect
5996 * an interrupt when a cable is inserted
5998 ppd->qsfp_info.cache_valid = 0;
5999 ppd->qsfp_info.reset_needed = 0;
6000 ppd->qsfp_info.limiting_active = 0;
6001 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6003 /* Invert the ModPresent pin now to detect plug-in */
6004 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6005 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6007 if ((ppd->offline_disabled_reason >
6009 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6010 (ppd->offline_disabled_reason ==
6011 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6012 ppd->offline_disabled_reason =
6014 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6016 if (ppd->host_link_state == HLS_DN_POLL) {
6018 * The link is still in POLL. This means
6019 * that the normal link down processing
6020 * will not happen. We have to do it here
6021 * before turning the DC off.
6023 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6026 dd_dev_info(dd, "%s: QSFP module inserted\n",
6029 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6030 ppd->qsfp_info.cache_valid = 0;
6031 ppd->qsfp_info.cache_refresh_required = 1;
6032 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6036 * Stop inversion of ModPresent pin to detect
6037 * removal of the cable
6039 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6040 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6041 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6043 ppd->offline_disabled_reason =
6044 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6048 if (reg & QSFP_HFI0_INT_N) {
6049 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6051 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6052 ppd->qsfp_info.check_interrupt_flags = 1;
6053 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6056 /* Schedule the QSFP work only if there is a cable attached. */
6057 if (qsfp_mod_present(ppd))
6058 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6061 static int request_host_lcb_access(struct hfi1_devdata *dd)
6065 ret = do_8051_command(dd, HCMD_MISC,
6066 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6067 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6068 if (ret != HCMD_SUCCESS) {
6069 dd_dev_err(dd, "%s: command failed with error %d\n",
6072 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6075 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6079 ret = do_8051_command(dd, HCMD_MISC,
6080 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6081 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6082 if (ret != HCMD_SUCCESS) {
6083 dd_dev_err(dd, "%s: command failed with error %d\n",
6086 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6090 * Set the LCB selector - allow host access. The DCC selector always
6091 * points to the host.
6093 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6095 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6096 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6097 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6101 * Clear the LCB selector - allow 8051 access. The DCC selector always
6102 * points to the host.
6104 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6106 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6107 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6111 * Acquire LCB access from the 8051. If the host already has access,
6112 * just increment a counter. Otherwise, inform the 8051 that the
6113 * host is taking access.
6117 * -EBUSY if the 8051 has control and cannot be disturbed
6118 * -errno if unable to acquire access from the 8051
6120 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6122 struct hfi1_pportdata *ppd = dd->pport;
6126 * Use the host link state lock so the operation of this routine
6127 * { link state check, selector change, count increment } can occur
6128 * as a unit against a link state change. Otherwise there is a
6129 * race between the state change and the count increment.
6132 mutex_lock(&ppd->hls_lock);
6134 while (!mutex_trylock(&ppd->hls_lock))
6138 /* this access is valid only when the link is up */
6139 if (ppd->host_link_state & HLS_DOWN) {
6140 dd_dev_info(dd, "%s: link state %s not up\n",
6141 __func__, link_state_name(ppd->host_link_state));
6146 if (dd->lcb_access_count == 0) {
6147 ret = request_host_lcb_access(dd);
6150 "%s: unable to acquire LCB access, err %d\n",
6154 set_host_lcb_access(dd);
6156 dd->lcb_access_count++;
6158 mutex_unlock(&ppd->hls_lock);
6163 * Release LCB access by decrementing the use count. If the count is moving
6164 * from 1 to 0, inform 8051 that it has control back.
6168 * -errno if unable to release access to the 8051
6170 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6175 * Use the host link state lock because the acquire needed it.
6176 * Here, we only need to keep { selector change, count decrement }
6180 mutex_lock(&dd->pport->hls_lock);
6182 while (!mutex_trylock(&dd->pport->hls_lock))
6186 if (dd->lcb_access_count == 0) {
6187 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6192 if (dd->lcb_access_count == 1) {
6193 set_8051_lcb_access(dd);
6194 ret = request_8051_lcb_access(dd);
6197 "%s: unable to release LCB access, err %d\n",
6199 /* restore host access if the grant didn't work */
6200 set_host_lcb_access(dd);
6204 dd->lcb_access_count--;
6206 mutex_unlock(&dd->pport->hls_lock);
6211 * Initialize LCB access variables and state. Called during driver load,
6212 * after most of the initialization is finished.
6214 * The DC default is LCB access on for the host. The driver defaults to
6215 * leaving access to the 8051. Assign access now - this constrains the call
6216 * to this routine to be after all LCB set-up is done. In particular, after
6217 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6219 static void init_lcb_access(struct hfi1_devdata *dd)
6221 dd->lcb_access_count = 0;
6225 * Write a response back to a 8051 request.
6227 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6229 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6230 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6232 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6233 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6237 * Handle host requests from the 8051.
6239 static void handle_8051_request(struct hfi1_pportdata *ppd)
6241 struct hfi1_devdata *dd = ppd->dd;
6246 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6247 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6248 return; /* no request */
6250 /* zero out COMPLETED so the response is seen */
6251 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6253 /* extract request details */
6254 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6255 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6256 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6257 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6260 case HREQ_LOAD_CONFIG:
6261 case HREQ_SAVE_CONFIG:
6262 case HREQ_READ_CONFIG:
6263 case HREQ_SET_TX_EQ_ABS:
6264 case HREQ_SET_TX_EQ_REL:
6266 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6268 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6270 case HREQ_CONFIG_DONE:
6271 hreq_response(dd, HREQ_SUCCESS, 0);
6274 case HREQ_INTERFACE_TEST:
6275 hreq_response(dd, HREQ_SUCCESS, data);
6278 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6279 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6284 static void write_global_credit(struct hfi1_devdata *dd,
6285 u8 vau, u16 total, u16 shared)
6287 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6289 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6291 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6292 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6296 * Set up initial VL15 credits of the remote. Assumes the rest of
6297 * the CM credit registers are zero from a previous global or credit reset .
6299 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6301 /* leave shared count at zero for both global and VL15 */
6302 write_global_credit(dd, vau, vl15buf, 0);
6304 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6305 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6309 * Zero all credit details from the previous connection and
6310 * reset the CM manager's internal counters.
6312 void reset_link_credits(struct hfi1_devdata *dd)
6316 /* remove all previous VL credit limits */
6317 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6318 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6319 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6320 write_global_credit(dd, 0, 0, 0);
6321 /* reset the CM block */
6322 pio_send_control(dd, PSC_CM_RESET);
6325 /* convert a vCU to a CU */
6326 static u32 vcu_to_cu(u8 vcu)
6331 /* convert a CU to a vCU */
6332 static u8 cu_to_vcu(u32 cu)
6337 /* convert a vAU to an AU */
6338 static u32 vau_to_au(u8 vau)
6340 return 8 * (1 << vau);
6343 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6345 ppd->sm_trap_qp = 0x0;
6350 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6352 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6356 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6357 write_csr(dd, DC_LCB_CFG_RUN, 0);
6358 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6359 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6360 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6361 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6362 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6363 reg = read_csr(dd, DCC_CFG_RESET);
6364 write_csr(dd, DCC_CFG_RESET, reg |
6365 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6366 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6367 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6369 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6370 write_csr(dd, DCC_CFG_RESET, reg);
6371 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6376 * This routine should be called after the link has been transitioned to
6377 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6380 * The expectation is that the caller of this routine would have taken
6381 * care of properly transitioning the link into the correct state.
6383 static void dc_shutdown(struct hfi1_devdata *dd)
6385 unsigned long flags;
6387 spin_lock_irqsave(&dd->dc8051_lock, flags);
6388 if (dd->dc_shutdown) {
6389 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6392 dd->dc_shutdown = 1;
6393 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6394 /* Shutdown the LCB */
6395 lcb_shutdown(dd, 1);
6397 * Going to OFFLINE would have causes the 8051 to put the
6398 * SerDes into reset already. Just need to shut down the 8051,
6401 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6405 * Calling this after the DC has been brought out of reset should not
6408 static void dc_start(struct hfi1_devdata *dd)
6410 unsigned long flags;
6413 spin_lock_irqsave(&dd->dc8051_lock, flags);
6414 if (!dd->dc_shutdown)
6416 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6417 /* Take the 8051 out of reset */
6418 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6419 /* Wait until 8051 is ready */
6420 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6422 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6425 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6426 write_csr(dd, DCC_CFG_RESET, 0x10);
6427 /* lcb_shutdown() with abort=1 does not restore these */
6428 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6429 spin_lock_irqsave(&dd->dc8051_lock, flags);
6430 dd->dc_shutdown = 0;
6432 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6436 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6438 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6440 u64 rx_radr, tx_radr;
6443 if (dd->icode != ICODE_FPGA_EMULATION)
6447 * These LCB defaults on emulator _s are good, nothing to do here:
6448 * LCB_CFG_TX_FIFOS_RADR
6449 * LCB_CFG_RX_FIFOS_RADR
6451 * LCB_CFG_IGNORE_LOST_RCLK
6453 if (is_emulator_s(dd))
6455 /* else this is _p */
6457 version = emulator_rev(dd);
6459 version = 0x2d; /* all B0 use 0x2d or higher settings */
6461 if (version <= 0x12) {
6462 /* release 0x12 and below */
6465 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6466 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6467 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6470 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6475 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6477 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6478 } else if (version <= 0x18) {
6479 /* release 0x13 up to 0x18 */
6480 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6482 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6483 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6484 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6485 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6486 } else if (version == 0x19) {
6488 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6490 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6491 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6492 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6493 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494 } else if (version == 0x1a) {
6496 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6498 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6504 /* release 0x1b and higher */
6505 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6507 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6508 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6509 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6510 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6513 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6514 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6515 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6516 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6517 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6521 * Handle a SMA idle message
6523 * This is a work-queue function outside of the interrupt.
6525 void handle_sma_message(struct work_struct *work)
6527 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6529 struct hfi1_devdata *dd = ppd->dd;
6534 * msg is bytes 1-4 of the 40-bit idle message - the command code
6537 ret = read_idle_sma(dd, &msg);
6540 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6542 * React to the SMA message. Byte[1] (0 for us) is the command.
6544 switch (msg & 0xff) {
6547 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6550 * Only expected in INIT or ARMED, discard otherwise.
6552 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6553 ppd->neighbor_normal = 1;
6555 case SMA_IDLE_ACTIVE:
6557 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6560 * Can activate the node. Discard otherwise.
6562 if (ppd->host_link_state == HLS_UP_ARMED &&
6563 ppd->is_active_optimize_enabled) {
6564 ppd->neighbor_normal = 1;
6565 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6569 "%s: received Active SMA idle message, couldn't set link to Active\n",
6575 "%s: received unexpected SMA idle message 0x%llx\n",
6581 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6584 unsigned long flags;
6586 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6587 rcvctrl = read_csr(dd, RCV_CTRL);
6590 write_csr(dd, RCV_CTRL, rcvctrl);
6591 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6594 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6596 adjust_rcvctrl(dd, add, 0);
6599 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6601 adjust_rcvctrl(dd, 0, clear);
6605 * Called from all interrupt handlers to start handling an SPC freeze.
6607 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6609 struct hfi1_devdata *dd = ppd->dd;
6610 struct send_context *sc;
6613 if (flags & FREEZE_SELF)
6614 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6616 /* enter frozen mode */
6617 dd->flags |= HFI1_FROZEN;
6619 /* notify all SDMA engines that they are going into a freeze */
6620 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6622 /* do halt pre-handling on all enabled send contexts */
6623 for (i = 0; i < dd->num_send_contexts; i++) {
6624 sc = dd->send_contexts[i].sc;
6625 if (sc && (sc->flags & SCF_ENABLED))
6626 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6629 /* Send context are frozen. Notify user space */
6630 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6632 if (flags & FREEZE_ABORT) {
6634 "Aborted freeze recovery. Please REBOOT system\n");
6637 /* queue non-interrupt handler */
6638 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6642 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6643 * depending on the "freeze" parameter.
6645 * No need to return an error if it times out, our only option
6646 * is to proceed anyway.
6648 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6650 unsigned long timeout;
6653 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6655 reg = read_csr(dd, CCE_STATUS);
6657 /* waiting until all indicators are set */
6658 if ((reg & ALL_FROZE) == ALL_FROZE)
6659 return; /* all done */
6661 /* waiting until all indicators are clear */
6662 if ((reg & ALL_FROZE) == 0)
6663 return; /* all done */
6666 if (time_after(jiffies, timeout)) {
6668 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6669 freeze ? "" : "un", reg & ALL_FROZE,
6670 freeze ? ALL_FROZE : 0ull);
6673 usleep_range(80, 120);
6678 * Do all freeze handling for the RXE block.
6680 static void rxe_freeze(struct hfi1_devdata *dd)
6685 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6687 /* disable all receive contexts */
6688 for (i = 0; i < dd->num_rcv_contexts; i++)
6689 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6693 * Unfreeze handling for the RXE block - kernel contexts only.
6694 * This will also enable the port. User contexts will do unfreeze
6695 * handling on a per-context basis as they call into the driver.
6698 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6703 /* enable all kernel contexts */
6704 for (i = 0; i < dd->n_krcv_queues; i++) {
6705 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6706 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6707 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6708 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6709 hfi1_rcvctrl(dd, rcvmask, i);
6713 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6717 * Non-interrupt SPC freeze handling.
6719 * This is a work-queue function outside of the triggering interrupt.
6721 void handle_freeze(struct work_struct *work)
6723 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6725 struct hfi1_devdata *dd = ppd->dd;
6727 /* wait for freeze indicators on all affected blocks */
6728 wait_for_freeze_status(dd, 1);
6730 /* SPC is now frozen */
6732 /* do send PIO freeze steps */
6735 /* do send DMA freeze steps */
6738 /* do send egress freeze steps - nothing to do */
6740 /* do receive freeze steps */
6744 * Unfreeze the hardware - clear the freeze, wait for each
6745 * block's frozen bit to clear, then clear the frozen flag.
6747 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6748 wait_for_freeze_status(dd, 0);
6751 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6752 wait_for_freeze_status(dd, 1);
6753 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6754 wait_for_freeze_status(dd, 0);
6757 /* do send PIO unfreeze steps for kernel contexts */
6758 pio_kernel_unfreeze(dd);
6760 /* do send DMA unfreeze steps */
6763 /* do send egress unfreeze steps - nothing to do */
6765 /* do receive unfreeze steps for kernel contexts */
6766 rxe_kernel_unfreeze(dd);
6769 * The unfreeze procedure touches global device registers when
6770 * it disables and re-enables RXE. Mark the device unfrozen
6771 * after all that is done so other parts of the driver waiting
6772 * for the device to unfreeze don't do things out of order.
6774 * The above implies that the meaning of HFI1_FROZEN flag is
6775 * "Device has gone into freeze mode and freeze mode handling
6776 * is still in progress."
6778 * The flag will be removed when freeze mode processing has
6781 dd->flags &= ~HFI1_FROZEN;
6782 wake_up(&dd->event_queue);
6784 /* no longer frozen */
6788 * Handle a link up interrupt from the 8051.
6790 * This is a work-queue function outside of the interrupt.
6792 void handle_link_up(struct work_struct *work)
6794 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6796 set_link_state(ppd, HLS_UP_INIT);
6798 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6799 read_ltp_rtt(ppd->dd);
6801 * OPA specifies that certain counters are cleared on a transition
6802 * to link up, so do that.
6804 clear_linkup_counters(ppd->dd);
6806 * And (re)set link up default values.
6808 set_linkup_defaults(ppd);
6810 /* enforce link speed enabled */
6811 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6812 /* oops - current speed is not enabled, bounce */
6814 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6815 ppd->link_speed_active, ppd->link_speed_enabled);
6816 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6817 OPA_LINKDOWN_REASON_SPEED_POLICY);
6818 set_link_state(ppd, HLS_DN_OFFLINE);
6824 * Several pieces of LNI information were cached for SMA in ppd.
6825 * Reset these on link down
6827 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6829 ppd->neighbor_guid = 0;
6830 ppd->neighbor_port_number = 0;
6831 ppd->neighbor_type = 0;
6832 ppd->neighbor_fm_security = 0;
6835 static const char * const link_down_reason_strs[] = {
6836 [OPA_LINKDOWN_REASON_NONE] = "None",
6837 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6838 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6839 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6840 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6841 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6842 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6843 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6844 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6846 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6848 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6849 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6850 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6851 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6852 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6853 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6854 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6855 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6856 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6857 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6858 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6859 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6860 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6861 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6862 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6863 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6864 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6865 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6866 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6867 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6868 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6869 "Excessive buffer overrun",
6870 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6871 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6872 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6873 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6874 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6875 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6876 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6877 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6878 "Local media not installed",
6879 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6880 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6881 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6882 "End to end not installed",
6883 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6884 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6885 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6886 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6887 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6888 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6891 /* return the neighbor link down reason string */
6892 static const char *link_down_reason_str(u8 reason)
6894 const char *str = NULL;
6896 if (reason < ARRAY_SIZE(link_down_reason_strs))
6897 str = link_down_reason_strs[reason];
6905 * Handle a link down interrupt from the 8051.
6907 * This is a work-queue function outside of the interrupt.
6909 void handle_link_down(struct work_struct *work)
6911 u8 lcl_reason, neigh_reason = 0;
6912 u8 link_down_reason;
6913 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6916 static const char ldr_str[] = "Link down reason: ";
6918 if ((ppd->host_link_state &
6919 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6920 ppd->port_type == PORT_TYPE_FIXED)
6921 ppd->offline_disabled_reason =
6922 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6924 /* Go offline first, then deal with reading/writing through 8051 */
6925 was_up = !!(ppd->host_link_state & HLS_UP);
6926 set_link_state(ppd, HLS_DN_OFFLINE);
6930 /* link down reason is only valid if the link was up */
6931 read_link_down_reason(ppd->dd, &link_down_reason);
6932 switch (link_down_reason) {
6933 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6934 /* the link went down, no idle message reason */
6935 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6938 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6940 * The neighbor reason is only valid if an idle message
6941 * was received for it.
6943 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6944 dd_dev_info(ppd->dd,
6945 "%sNeighbor link down message %d, %s\n",
6946 ldr_str, neigh_reason,
6947 link_down_reason_str(neigh_reason));
6949 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6950 dd_dev_info(ppd->dd,
6951 "%sHost requested link to go offline\n",
6955 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6956 ldr_str, link_down_reason);
6961 * If no reason, assume peer-initiated but missed
6962 * LinkGoingDown idle flits.
6964 if (neigh_reason == 0)
6965 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6967 /* went down while polling or going up */
6968 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6971 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6973 /* inform the SMA when the link transitions from up to down */
6974 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6975 ppd->neigh_link_down_reason.sma == 0) {
6976 ppd->local_link_down_reason.sma =
6977 ppd->local_link_down_reason.latest;
6978 ppd->neigh_link_down_reason.sma =
6979 ppd->neigh_link_down_reason.latest;
6982 reset_neighbor_info(ppd);
6984 /* disable the port */
6985 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6988 * If there is no cable attached, turn the DC off. Otherwise,
6989 * start the link bring up.
6991 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
6992 dc_shutdown(ppd->dd);
6997 void handle_link_bounce(struct work_struct *work)
6999 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7003 * Only do something if the link is currently up.
7005 if (ppd->host_link_state & HLS_UP) {
7006 set_link_state(ppd, HLS_DN_OFFLINE);
7009 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7010 __func__, link_state_name(ppd->host_link_state));
7015 * Mask conversion: Capability exchange to Port LTP. The capability
7016 * exchange has an implicit 16b CRC that is mandatory.
7018 static int cap_to_port_ltp(int cap)
7020 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7022 if (cap & CAP_CRC_14B)
7023 port_ltp |= PORT_LTP_CRC_MODE_14;
7024 if (cap & CAP_CRC_48B)
7025 port_ltp |= PORT_LTP_CRC_MODE_48;
7026 if (cap & CAP_CRC_12B_16B_PER_LANE)
7027 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7033 * Convert an OPA Port LTP mask to capability mask
7035 int port_ltp_to_cap(int port_ltp)
7039 if (port_ltp & PORT_LTP_CRC_MODE_14)
7040 cap_mask |= CAP_CRC_14B;
7041 if (port_ltp & PORT_LTP_CRC_MODE_48)
7042 cap_mask |= CAP_CRC_48B;
7043 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7044 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7050 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7052 static int lcb_to_port_ltp(int lcb_crc)
7056 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7057 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7058 else if (lcb_crc == LCB_CRC_48B)
7059 port_ltp = PORT_LTP_CRC_MODE_48;
7060 else if (lcb_crc == LCB_CRC_14B)
7061 port_ltp = PORT_LTP_CRC_MODE_14;
7063 port_ltp = PORT_LTP_CRC_MODE_16;
7069 * Our neighbor has indicated that we are allowed to act as a fabric
7070 * manager, so place the full management partition key in the second
7071 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7072 * that we should already have the limited management partition key in
7073 * array element 1, and also that the port is not yet up when
7074 * add_full_mgmt_pkey() is invoked.
7076 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7078 struct hfi1_devdata *dd = ppd->dd;
7080 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7081 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7082 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7083 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7084 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7085 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7086 hfi1_event_pkey_change(ppd->dd, ppd->port);
7089 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7091 if (ppd->pkeys[2] != 0) {
7093 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7094 hfi1_event_pkey_change(ppd->dd, ppd->port);
7099 * Convert the given link width to the OPA link width bitmask.
7101 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7106 * Simulator and quick linkup do not set the width.
7107 * Just set it to 4x without complaint.
7109 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7110 return OPA_LINK_WIDTH_4X;
7111 return 0; /* no lanes up */
7112 case 1: return OPA_LINK_WIDTH_1X;
7113 case 2: return OPA_LINK_WIDTH_2X;
7114 case 3: return OPA_LINK_WIDTH_3X;
7116 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7119 case 4: return OPA_LINK_WIDTH_4X;
7124 * Do a population count on the bottom nibble.
7126 static const u8 bit_counts[16] = {
7127 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7130 static inline u8 nibble_to_count(u8 nibble)
7132 return bit_counts[nibble & 0xf];
7136 * Read the active lane information from the 8051 registers and return
7139 * Active lane information is found in these 8051 registers:
7143 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7149 u8 tx_polarity_inversion;
7150 u8 rx_polarity_inversion;
7153 /* read the active lanes */
7154 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7155 &rx_polarity_inversion, &max_rate);
7156 read_local_lni(dd, &enable_lane_rx);
7158 /* convert to counts */
7159 tx = nibble_to_count(enable_lane_tx);
7160 rx = nibble_to_count(enable_lane_rx);
7163 * Set link_speed_active here, overriding what was set in
7164 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7165 * set the max_rate field in handle_verify_cap until v0.19.
7167 if ((dd->icode == ICODE_RTL_SILICON) &&
7168 (dd->dc8051_ver < dc8051_ver(0, 19))) {
7169 /* max_rate: 0 = 12.5G, 1 = 25G */
7172 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7176 "%s: unexpected max rate %d, using 25Gb\n",
7177 __func__, (int)max_rate);
7180 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7186 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7187 enable_lane_tx, tx, enable_lane_rx, rx);
7188 *tx_width = link_width_to_bits(dd, tx);
7189 *rx_width = link_width_to_bits(dd, rx);
7193 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7194 * Valid after the end of VerifyCap and during LinkUp. Does not change
7195 * after link up. I.e. look elsewhere for downgrade information.
7198 * + bits [7:4] contain the number of active transmitters
7199 * + bits [3:0] contain the number of active receivers
7200 * These are numbers 1 through 4 and can be different values if the
7201 * link is asymmetric.
7203 * verify_cap_local_fm_link_width[0] retains its original value.
7205 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7209 u8 misc_bits, local_flags;
7210 u16 active_tx, active_rx;
7212 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7214 rx = (widths >> 8) & 0xf;
7216 *tx_width = link_width_to_bits(dd, tx);
7217 *rx_width = link_width_to_bits(dd, rx);
7219 /* print the active widths */
7220 get_link_widths(dd, &active_tx, &active_rx);
7224 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7225 * hardware information when the link first comes up.
7227 * The link width is not available until after VerifyCap.AllFramesReceived
7228 * (the trigger for handle_verify_cap), so this is outside that routine
7229 * and should be called when the 8051 signals linkup.
7231 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7233 u16 tx_width, rx_width;
7235 /* get end-of-LNI link widths */
7236 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7238 /* use tx_width as the link is supposed to be symmetric on link up */
7239 ppd->link_width_active = tx_width;
7240 /* link width downgrade active (LWD.A) starts out matching LW.A */
7241 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7242 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7243 /* per OPA spec, on link up LWD.E resets to LWD.S */
7244 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7245 /* cache the active egress rate (units {10^6 bits/sec]) */
7246 ppd->current_egress_rate = active_egress_rate(ppd);
7250 * Handle a verify capabilities interrupt from the 8051.
7252 * This is a work-queue function outside of the interrupt.
7254 void handle_verify_cap(struct work_struct *work)
7256 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7258 struct hfi1_devdata *dd = ppd->dd;
7260 u8 power_management;
7270 u16 active_tx, active_rx;
7271 u8 partner_supported_crc;
7275 set_link_state(ppd, HLS_VERIFY_CAP);
7277 lcb_shutdown(dd, 0);
7278 adjust_lcb_for_fpga_serdes(dd);
7281 * These are now valid:
7282 * remote VerifyCap fields in the general LNI config
7283 * CSR DC8051_STS_REMOTE_GUID
7284 * CSR DC8051_STS_REMOTE_NODE_TYPE
7285 * CSR DC8051_STS_REMOTE_FM_SECURITY
7286 * CSR DC8051_STS_REMOTE_PORT_NO
7289 read_vc_remote_phy(dd, &power_management, &continious);
7290 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7291 &partner_supported_crc);
7292 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7293 read_remote_device_id(dd, &device_id, &device_rev);
7295 * And the 'MgmtAllowed' information, which is exchanged during
7296 * LNI, is also be available at this point.
7298 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7299 /* print the active widths */
7300 get_link_widths(dd, &active_tx, &active_rx);
7302 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7303 (int)power_management, (int)continious);
7305 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7306 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7307 (int)partner_supported_crc);
7308 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7309 (u32)remote_tx_rate, (u32)link_widths);
7310 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7311 (u32)device_id, (u32)device_rev);
7313 * The peer vAU value just read is the peer receiver value. HFI does
7314 * not support a transmit vAU of 0 (AU == 8). We advertised that
7315 * with Z=1 in the fabric capabilities sent to the peer. The peer
7316 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7317 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7318 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7319 * subject to the Z value exception.
7323 set_up_vl15(dd, vau, vl15buf);
7325 /* set up the LCB CRC mode */
7326 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7328 /* order is important: use the lowest bit in common */
7329 if (crc_mask & CAP_CRC_14B)
7330 crc_val = LCB_CRC_14B;
7331 else if (crc_mask & CAP_CRC_48B)
7332 crc_val = LCB_CRC_48B;
7333 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7334 crc_val = LCB_CRC_12B_16B_PER_LANE;
7336 crc_val = LCB_CRC_16B;
7338 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7339 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7340 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7342 /* set (14b only) or clear sideband credit */
7343 reg = read_csr(dd, SEND_CM_CTRL);
7344 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7345 write_csr(dd, SEND_CM_CTRL,
7346 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7348 write_csr(dd, SEND_CM_CTRL,
7349 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7352 ppd->link_speed_active = 0; /* invalid value */
7353 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7354 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7355 switch (remote_tx_rate) {
7357 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7360 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7364 /* actual rate is highest bit of the ANDed rates */
7365 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7368 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7370 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7372 if (ppd->link_speed_active == 0) {
7373 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7374 __func__, (int)remote_tx_rate);
7375 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7379 * Cache the values of the supported, enabled, and active
7380 * LTP CRC modes to return in 'portinfo' queries. But the bit
7381 * flags that are returned in the portinfo query differ from
7382 * what's in the link_crc_mask, crc_sizes, and crc_val
7383 * variables. Convert these here.
7385 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7386 /* supported crc modes */
7387 ppd->port_ltp_crc_mode |=
7388 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7389 /* enabled crc modes */
7390 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7391 /* active crc mode */
7393 /* set up the remote credit return table */
7394 assign_remote_cm_au_table(dd, vcu);
7397 * The LCB is reset on entry to handle_verify_cap(), so this must
7398 * be applied on every link up.
7400 * Adjust LCB error kill enable to kill the link if
7401 * these RBUF errors are seen:
7402 * REPLAY_BUF_MBE_SMASK
7403 * FLIT_INPUT_BUF_MBE_SMASK
7405 if (is_ax(dd)) { /* fixed in B0 */
7406 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7407 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7408 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7409 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7412 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7413 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7415 /* give 8051 access to the LCB CSRs */
7416 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7417 set_8051_lcb_access(dd);
7419 ppd->neighbor_guid =
7420 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7421 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7422 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7423 ppd->neighbor_type =
7424 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7425 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7426 ppd->neighbor_fm_security =
7427 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7428 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7430 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7431 ppd->neighbor_guid, ppd->neighbor_type,
7432 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7433 if (ppd->mgmt_allowed)
7434 add_full_mgmt_pkey(ppd);
7436 /* tell the 8051 to go to LinkUp */
7437 set_link_state(ppd, HLS_GOING_UP);
7441 * Apply the link width downgrade enabled policy against the current active
7444 * Called when the enabled policy changes or the active link widths change.
7446 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7453 /* use the hls lock to avoid a race with actual link up */
7456 mutex_lock(&ppd->hls_lock);
7457 /* only apply if the link is up */
7458 if (ppd->host_link_state & HLS_DOWN) {
7459 /* still going up..wait and retry */
7460 if (ppd->host_link_state & HLS_GOING_UP) {
7461 if (++tries < 1000) {
7462 mutex_unlock(&ppd->hls_lock);
7463 usleep_range(100, 120); /* arbitrary */
7467 "%s: giving up waiting for link state change\n",
7473 lwde = ppd->link_width_downgrade_enabled;
7475 if (refresh_widths) {
7476 get_link_widths(ppd->dd, &tx, &rx);
7477 ppd->link_width_downgrade_tx_active = tx;
7478 ppd->link_width_downgrade_rx_active = rx;
7481 if (ppd->link_width_downgrade_tx_active == 0 ||
7482 ppd->link_width_downgrade_rx_active == 0) {
7483 /* the 8051 reported a dead link as a downgrade */
7484 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7485 } else if (lwde == 0) {
7486 /* downgrade is disabled */
7488 /* bounce if not at starting active width */
7489 if ((ppd->link_width_active !=
7490 ppd->link_width_downgrade_tx_active) ||
7491 (ppd->link_width_active !=
7492 ppd->link_width_downgrade_rx_active)) {
7494 "Link downgrade is disabled and link has downgraded, downing link\n");
7496 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7497 ppd->link_width_active,
7498 ppd->link_width_downgrade_tx_active,
7499 ppd->link_width_downgrade_rx_active);
7502 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7503 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7504 /* Tx or Rx is outside the enabled policy */
7506 "Link is outside of downgrade allowed, downing link\n");
7508 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7509 lwde, ppd->link_width_downgrade_tx_active,
7510 ppd->link_width_downgrade_rx_active);
7515 mutex_unlock(&ppd->hls_lock);
7518 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7519 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7520 set_link_state(ppd, HLS_DN_OFFLINE);
7526 * Handle a link downgrade interrupt from the 8051.
7528 * This is a work-queue function outside of the interrupt.
7530 void handle_link_downgrade(struct work_struct *work)
7532 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7533 link_downgrade_work);
7535 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7536 apply_link_downgrade_policy(ppd, 1);
7539 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7541 return flag_string(buf, buf_len, flags, dcc_err_flags,
7542 ARRAY_SIZE(dcc_err_flags));
7545 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7547 return flag_string(buf, buf_len, flags, lcb_err_flags,
7548 ARRAY_SIZE(lcb_err_flags));
7551 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7553 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7554 ARRAY_SIZE(dc8051_err_flags));
7557 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7559 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7560 ARRAY_SIZE(dc8051_info_err_flags));
7563 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7565 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7566 ARRAY_SIZE(dc8051_info_host_msg_flags));
7569 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7571 struct hfi1_pportdata *ppd = dd->pport;
7572 u64 info, err, host_msg;
7573 int queue_link_down = 0;
7576 /* look at the flags */
7577 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7578 /* 8051 information set by firmware */
7579 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7580 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7581 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7582 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7584 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7585 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7588 * Handle error flags.
7590 if (err & FAILED_LNI) {
7592 * LNI error indications are cleared by the 8051
7593 * only when starting polling. Only pay attention
7594 * to them when in the states that occur during
7597 if (ppd->host_link_state
7598 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7599 queue_link_down = 1;
7600 dd_dev_info(dd, "Link error: %s\n",
7601 dc8051_info_err_string(buf,
7606 err &= ~(u64)FAILED_LNI;
7608 /* unknown frames can happen durning LNI, just count */
7609 if (err & UNKNOWN_FRAME) {
7610 ppd->unknown_frame_count++;
7611 err &= ~(u64)UNKNOWN_FRAME;
7614 /* report remaining errors, but do not do anything */
7615 dd_dev_err(dd, "8051 info error: %s\n",
7616 dc8051_info_err_string(buf, sizeof(buf),
7621 * Handle host message flags.
7623 if (host_msg & HOST_REQ_DONE) {
7625 * Presently, the driver does a busy wait for
7626 * host requests to complete. This is only an
7627 * informational message.
7628 * NOTE: The 8051 clears the host message
7629 * information *on the next 8051 command*.
7630 * Therefore, when linkup is achieved,
7631 * this flag will still be set.
7633 host_msg &= ~(u64)HOST_REQ_DONE;
7635 if (host_msg & BC_SMA_MSG) {
7636 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7637 host_msg &= ~(u64)BC_SMA_MSG;
7639 if (host_msg & LINKUP_ACHIEVED) {
7640 dd_dev_info(dd, "8051: Link up\n");
7641 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7642 host_msg &= ~(u64)LINKUP_ACHIEVED;
7644 if (host_msg & EXT_DEVICE_CFG_REQ) {
7645 handle_8051_request(ppd);
7646 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7648 if (host_msg & VERIFY_CAP_FRAME) {
7649 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7650 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7652 if (host_msg & LINK_GOING_DOWN) {
7653 const char *extra = "";
7654 /* no downgrade action needed if going down */
7655 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7656 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7657 extra = " (ignoring downgrade)";
7659 dd_dev_info(dd, "8051: Link down%s\n", extra);
7660 queue_link_down = 1;
7661 host_msg &= ~(u64)LINK_GOING_DOWN;
7663 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7664 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7665 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7668 /* report remaining messages, but do not do anything */
7669 dd_dev_info(dd, "8051 info host message: %s\n",
7670 dc8051_info_host_msg_string(buf,
7675 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7677 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7679 * Lost the 8051 heartbeat. If this happens, we
7680 * receive constant interrupts about it. Disable
7681 * the interrupt after the first.
7683 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7684 write_csr(dd, DC_DC8051_ERR_EN,
7685 read_csr(dd, DC_DC8051_ERR_EN) &
7686 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7688 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7691 /* report the error, but do not do anything */
7692 dd_dev_err(dd, "8051 error: %s\n",
7693 dc8051_err_string(buf, sizeof(buf), reg));
7696 if (queue_link_down) {
7698 * if the link is already going down or disabled, do not
7701 if ((ppd->host_link_state &
7702 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7703 ppd->link_enabled == 0) {
7704 dd_dev_info(dd, "%s: not queuing link down\n",
7707 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7712 static const char * const fm_config_txt[] = {
7714 "BadHeadDist: Distance violation between two head flits",
7716 "BadTailDist: Distance violation between two tail flits",
7718 "BadCtrlDist: Distance violation between two credit control flits",
7720 "BadCrdAck: Credits return for unsupported VL",
7722 "UnsupportedVLMarker: Received VL Marker",
7724 "BadPreempt: Exceeded the preemption nesting level",
7726 "BadControlFlit: Received unsupported control flit",
7729 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7732 static const char * const port_rcv_txt[] = {
7734 "BadPktLen: Illegal PktLen",
7736 "PktLenTooLong: Packet longer than PktLen",
7738 "PktLenTooShort: Packet shorter than PktLen",
7740 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7742 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7744 "BadL2: Illegal L2 opcode",
7746 "BadSC: Unsupported SC",
7748 "BadRC: Illegal RC",
7750 "PreemptError: Preempting with same VL",
7752 "PreemptVL15: Preempting a VL15 packet",
7755 #define OPA_LDR_FMCONFIG_OFFSET 16
7756 #define OPA_LDR_PORTRCV_OFFSET 0
7757 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7759 u64 info, hdr0, hdr1;
7762 struct hfi1_pportdata *ppd = dd->pport;
7766 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7767 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7768 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7769 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7770 /* set status bit */
7771 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7773 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7776 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7777 struct hfi1_pportdata *ppd = dd->pport;
7778 /* this counter saturates at (2^32) - 1 */
7779 if (ppd->link_downed < (u32)UINT_MAX)
7781 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7784 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7785 u8 reason_valid = 1;
7787 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7788 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7789 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7790 /* set status bit */
7791 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7801 extra = fm_config_txt[info];
7804 extra = fm_config_txt[info];
7805 if (ppd->port_error_action &
7806 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7809 * lcl_reason cannot be derived from info
7813 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7818 snprintf(buf, sizeof(buf), "reserved%lld", info);
7823 if (reason_valid && !do_bounce) {
7824 do_bounce = ppd->port_error_action &
7825 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7826 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7829 /* just report this */
7830 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7831 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7834 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7835 u8 reason_valid = 1;
7837 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7838 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7839 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7840 if (!(dd->err_info_rcvport.status_and_code &
7841 OPA_EI_STATUS_SMASK)) {
7842 dd->err_info_rcvport.status_and_code =
7843 info & OPA_EI_CODE_SMASK;
7844 /* set status bit */
7845 dd->err_info_rcvport.status_and_code |=
7846 OPA_EI_STATUS_SMASK;
7848 * save first 2 flits in the packet that caused
7851 dd->err_info_rcvport.packet_flit1 = hdr0;
7852 dd->err_info_rcvport.packet_flit2 = hdr1;
7865 extra = port_rcv_txt[info];
7869 snprintf(buf, sizeof(buf), "reserved%lld", info);
7874 if (reason_valid && !do_bounce) {
7875 do_bounce = ppd->port_error_action &
7876 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7877 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7880 /* just report this */
7881 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7882 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7885 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7888 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7889 /* informative only */
7890 dd_dev_info(dd, "8051 access to LCB blocked\n");
7891 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7893 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7894 /* informative only */
7895 dd_dev_info(dd, "host access to LCB blocked\n");
7896 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7899 /* report any remaining errors */
7901 dd_dev_info(dd, "DCC Error: %s\n",
7902 dcc_err_string(buf, sizeof(buf), reg));
7904 if (lcl_reason == 0)
7905 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7908 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7909 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7910 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7914 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7918 dd_dev_info(dd, "LCB Error: %s\n",
7919 lcb_err_string(buf, sizeof(buf), reg));
7923 * CCE block DC interrupt. Source is < 8.
7925 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7927 const struct err_reg_info *eri = &dc_errs[source];
7930 interrupt_clear_down(dd, 0, eri);
7931 } else if (source == 3 /* dc_lbm_int */) {
7933 * This indicates that a parity error has occurred on the
7934 * address/control lines presented to the LBM. The error
7935 * is a single pulse, there is no associated error flag,
7936 * and it is non-maskable. This is because if a parity
7937 * error occurs on the request the request is dropped.
7938 * This should never occur, but it is nice to know if it
7941 dd_dev_err(dd, "Parity error in DC LBM block\n");
7943 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7948 * TX block send credit interrupt. Source is < 160.
7950 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7952 sc_group_release_update(dd, source);
7956 * TX block SDMA interrupt. Source is < 48.
7958 * SDMA interrupts are grouped by type:
7961 * N - 2N-1 = SDmaProgress
7962 * 2N - 3N-1 = SDmaIdle
7964 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7966 /* what interrupt */
7967 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7969 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7971 #ifdef CONFIG_SDMA_VERBOSITY
7972 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7973 slashstrip(__FILE__), __LINE__, __func__);
7974 sdma_dumpstate(&dd->per_sdma[which]);
7977 if (likely(what < 3 && which < dd->num_sdma)) {
7978 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7980 /* should not happen */
7981 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7986 * RX block receive available interrupt. Source is < 160.
7988 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7990 struct hfi1_ctxtdata *rcd;
7993 if (likely(source < dd->num_rcv_contexts)) {
7994 rcd = dd->rcd[source];
7996 if (source < dd->first_user_ctxt)
7997 rcd->do_interrupt(rcd, 0);
7999 handle_user_interrupt(rcd);
8002 /* received an interrupt, but no rcd */
8003 err_detail = "dataless";
8005 /* received an interrupt, but are not using that context */
8006 err_detail = "out of range";
8008 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8009 err_detail, source);
8013 * RX block receive urgent interrupt. Source is < 160.
8015 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8017 struct hfi1_ctxtdata *rcd;
8020 if (likely(source < dd->num_rcv_contexts)) {
8021 rcd = dd->rcd[source];
8023 /* only pay attention to user urgent interrupts */
8024 if (source >= dd->first_user_ctxt)
8025 handle_user_interrupt(rcd);
8028 /* received an interrupt, but no rcd */
8029 err_detail = "dataless";
8031 /* received an interrupt, but are not using that context */
8032 err_detail = "out of range";
8034 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8035 err_detail, source);
8039 * Reserved range interrupt. Should not be called in normal operation.
8041 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8045 dd_dev_err(dd, "unexpected %s interrupt\n",
8046 is_reserved_name(name, sizeof(name), source));
8049 static const struct is_table is_table[] = {
8052 * name func interrupt func
8054 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8055 is_misc_err_name, is_misc_err_int },
8056 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8057 is_sdma_eng_err_name, is_sdma_eng_err_int },
8058 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8059 is_sendctxt_err_name, is_sendctxt_err_int },
8060 { IS_SDMA_START, IS_SDMA_END,
8061 is_sdma_eng_name, is_sdma_eng_int },
8062 { IS_VARIOUS_START, IS_VARIOUS_END,
8063 is_various_name, is_various_int },
8064 { IS_DC_START, IS_DC_END,
8065 is_dc_name, is_dc_int },
8066 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8067 is_rcv_avail_name, is_rcv_avail_int },
8068 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8069 is_rcv_urgent_name, is_rcv_urgent_int },
8070 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8071 is_send_credit_name, is_send_credit_int},
8072 { IS_RESERVED_START, IS_RESERVED_END,
8073 is_reserved_name, is_reserved_int},
8077 * Interrupt source interrupt - called when the given source has an interrupt.
8078 * Source is a bit index into an array of 64-bit integers.
8080 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8082 const struct is_table *entry;
8084 /* avoids a double compare by walking the table in-order */
8085 for (entry = &is_table[0]; entry->is_name; entry++) {
8086 if (source < entry->end) {
8087 trace_hfi1_interrupt(dd, entry, source);
8088 entry->is_int(dd, source - entry->start);
8092 /* fell off the end */
8093 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8097 * General interrupt handler. This is able to correctly handle
8098 * all interrupts in case INTx is used.
8100 static irqreturn_t general_interrupt(int irq, void *data)
8102 struct hfi1_devdata *dd = data;
8103 u64 regs[CCE_NUM_INT_CSRS];
8107 this_cpu_inc(*dd->int_counter);
8109 /* phase 1: scan and clear all handled interrupts */
8110 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8111 if (dd->gi_mask[i] == 0) {
8112 regs[i] = 0; /* used later */
8115 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8117 /* only clear if anything is set */
8119 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8122 /* phase 2: call the appropriate handler */
8123 for_each_set_bit(bit, (unsigned long *)®s[0],
8124 CCE_NUM_INT_CSRS * 64) {
8125 is_interrupt(dd, bit);
8131 static irqreturn_t sdma_interrupt(int irq, void *data)
8133 struct sdma_engine *sde = data;
8134 struct hfi1_devdata *dd = sde->dd;
8137 #ifdef CONFIG_SDMA_VERBOSITY
8138 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8139 slashstrip(__FILE__), __LINE__, __func__);
8140 sdma_dumpstate(sde);
8143 this_cpu_inc(*dd->int_counter);
8145 /* This read_csr is really bad in the hot path */
8146 status = read_csr(dd,
8147 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8149 if (likely(status)) {
8150 /* clear the interrupt(s) */
8152 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8155 /* handle the interrupt(s) */
8156 sdma_engine_interrupt(sde, status);
8158 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8165 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8166 * to insure that the write completed. This does NOT guarantee that
8167 * queued DMA writes to memory from the chip are pushed.
8169 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8171 struct hfi1_devdata *dd = rcd->dd;
8172 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8174 mmiowb(); /* make sure everything before is written */
8175 write_csr(dd, addr, rcd->imask);
8176 /* force the above write on the chip and get a value back */
8177 (void)read_csr(dd, addr);
8180 /* force the receive interrupt */
8181 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8183 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8187 * Return non-zero if a packet is present.
8189 * This routine is called when rechecking for packets after the RcvAvail
8190 * interrupt has been cleared down. First, do a quick check of memory for
8191 * a packet present. If not found, use an expensive CSR read of the context
8192 * tail to determine the actual tail. The CSR read is necessary because there
8193 * is no method to push pending DMAs to memory other than an interrupt and we
8194 * are trying to determine if we need to force an interrupt.
8196 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8201 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8202 present = (rcd->seq_cnt ==
8203 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8204 else /* is RDMA rtail */
8205 present = (rcd->head != get_rcvhdrtail(rcd));
8210 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8211 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8212 return rcd->head != tail;
8216 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8217 * This routine will try to handle packets immediately (latency), but if
8218 * it finds too many, it will invoke the thread handler (bandwitdh). The
8219 * chip receive interrupt is *not* cleared down until this or the thread (if
8220 * invoked) is finished. The intent is to avoid extra interrupts while we
8221 * are processing packets anyway.
8223 static irqreturn_t receive_context_interrupt(int irq, void *data)
8225 struct hfi1_ctxtdata *rcd = data;
8226 struct hfi1_devdata *dd = rcd->dd;
8230 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8231 this_cpu_inc(*dd->int_counter);
8232 aspm_ctx_disable(rcd);
8234 /* receive interrupt remains blocked while processing packets */
8235 disposition = rcd->do_interrupt(rcd, 0);
8238 * Too many packets were seen while processing packets in this
8239 * IRQ handler. Invoke the handler thread. The receive interrupt
8242 if (disposition == RCV_PKT_LIMIT)
8243 return IRQ_WAKE_THREAD;
8246 * The packet processor detected no more packets. Clear the receive
8247 * interrupt and recheck for a packet packet that may have arrived
8248 * after the previous check and interrupt clear. If a packet arrived,
8249 * force another interrupt.
8251 clear_recv_intr(rcd);
8252 present = check_packet_present(rcd);
8254 force_recv_intr(rcd);
8260 * Receive packet thread handler. This expects to be invoked with the
8261 * receive interrupt still blocked.
8263 static irqreturn_t receive_context_thread(int irq, void *data)
8265 struct hfi1_ctxtdata *rcd = data;
8268 /* receive interrupt is still blocked from the IRQ handler */
8269 (void)rcd->do_interrupt(rcd, 1);
8272 * The packet processor will only return if it detected no more
8273 * packets. Hold IRQs here so we can safely clear the interrupt and
8274 * recheck for a packet that may have arrived after the previous
8275 * check and the interrupt clear. If a packet arrived, force another
8278 local_irq_disable();
8279 clear_recv_intr(rcd);
8280 present = check_packet_present(rcd);
8282 force_recv_intr(rcd);
8288 /* ========================================================================= */
8290 u32 read_physical_state(struct hfi1_devdata *dd)
8294 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8295 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8296 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8299 u32 read_logical_state(struct hfi1_devdata *dd)
8303 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8304 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8305 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8308 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8312 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8313 /* clear current state, set new state */
8314 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8315 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8316 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8320 * Use the 8051 to read a LCB CSR.
8322 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8327 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8328 if (acquire_lcb_access(dd, 0) == 0) {
8329 *data = read_csr(dd, addr);
8330 release_lcb_access(dd, 0);
8336 /* register is an index of LCB registers: (offset - base) / 8 */
8337 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8338 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8339 if (ret != HCMD_SUCCESS)
8345 * Read an LCB CSR. Access may not be in host control, so check.
8346 * Return 0 on success, -EBUSY on failure.
8348 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8350 struct hfi1_pportdata *ppd = dd->pport;
8352 /* if up, go through the 8051 for the value */
8353 if (ppd->host_link_state & HLS_UP)
8354 return read_lcb_via_8051(dd, addr, data);
8355 /* if going up or down, no access */
8356 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8358 /* otherwise, host has access */
8359 *data = read_csr(dd, addr);
8364 * Use the 8051 to write a LCB CSR.
8366 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8371 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8372 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8373 if (acquire_lcb_access(dd, 0) == 0) {
8374 write_csr(dd, addr, data);
8375 release_lcb_access(dd, 0);
8381 /* register is an index of LCB registers: (offset - base) / 8 */
8382 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8383 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8384 if (ret != HCMD_SUCCESS)
8390 * Write an LCB CSR. Access may not be in host control, so check.
8391 * Return 0 on success, -EBUSY on failure.
8393 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8395 struct hfi1_pportdata *ppd = dd->pport;
8397 /* if up, go through the 8051 for the value */
8398 if (ppd->host_link_state & HLS_UP)
8399 return write_lcb_via_8051(dd, addr, data);
8400 /* if going up or down, no access */
8401 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8403 /* otherwise, host has access */
8404 write_csr(dd, addr, data);
8410 * < 0 = Linux error, not able to get access
8411 * > 0 = 8051 command RETURN_CODE
8413 static int do_8051_command(
8414 struct hfi1_devdata *dd,
8421 unsigned long flags;
8422 unsigned long timeout;
8424 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8427 * Alternative to holding the lock for a long time:
8428 * - keep busy wait - have other users bounce off
8430 spin_lock_irqsave(&dd->dc8051_lock, flags);
8432 /* We can't send any commands to the 8051 if it's in reset */
8433 if (dd->dc_shutdown) {
8434 return_code = -ENODEV;
8439 * If an 8051 host command timed out previously, then the 8051 is
8442 * On first timeout, attempt to reset and restart the entire DC
8443 * block (including 8051). (Is this too big of a hammer?)
8445 * If the 8051 times out a second time, the reset did not bring it
8446 * back to healthy life. In that case, fail any subsequent commands.
8448 if (dd->dc8051_timed_out) {
8449 if (dd->dc8051_timed_out > 1) {
8451 "Previous 8051 host command timed out, skipping command %u\n",
8453 return_code = -ENXIO;
8456 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8459 spin_lock_irqsave(&dd->dc8051_lock, flags);
8463 * If there is no timeout, then the 8051 command interface is
8464 * waiting for a command.
8468 * When writing a LCB CSR, out_data contains the full value to
8469 * to be written, while in_data contains the relative LCB
8470 * address in 7:0. Do the work here, rather than the caller,
8471 * of distrubting the write data to where it needs to go:
8474 * 39:00 -> in_data[47:8]
8475 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8476 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8478 if (type == HCMD_WRITE_LCB_CSR) {
8479 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8480 reg = ((((*out_data) >> 40) & 0xff) <<
8481 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8482 | ((((*out_data) >> 48) & 0xffff) <<
8483 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8484 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8488 * Do two writes: the first to stabilize the type and req_data, the
8489 * second to activate.
8491 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8492 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8493 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8494 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8495 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8496 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8497 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8499 /* wait for completion, alternate: interrupt */
8500 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8502 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8503 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8506 if (time_after(jiffies, timeout)) {
8507 dd->dc8051_timed_out++;
8508 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8511 return_code = -ETIMEDOUT;
8518 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8519 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8520 if (type == HCMD_READ_LCB_CSR) {
8521 /* top 16 bits are in a different register */
8522 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8523 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8525 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8528 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8529 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8530 dd->dc8051_timed_out = 0;
8532 * Clear command for next user.
8534 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8537 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8542 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8544 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8547 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8548 u8 lane_id, u32 config_data)
8553 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8554 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8555 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8556 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8557 if (ret != HCMD_SUCCESS) {
8559 "load 8051 config: field id %d, lane %d, err %d\n",
8560 (int)field_id, (int)lane_id, ret);
8566 * Read the 8051 firmware "registers". Use the RAM directly. Always
8567 * set the result, even on error.
8568 * Return 0 on success, -errno on failure
8570 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8577 /* address start depends on the lane_id */
8579 addr = (4 * NUM_GENERAL_FIELDS)
8580 + (lane_id * 4 * NUM_LANE_FIELDS);
8583 addr += field_id * 4;
8585 /* read is in 8-byte chunks, hardware will truncate the address down */
8586 ret = read_8051_data(dd, addr, 8, &big_data);
8589 /* extract the 4 bytes we want */
8591 *result = (u32)(big_data >> 32);
8593 *result = (u32)big_data;
8596 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8597 __func__, lane_id, field_id);
8603 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8608 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8609 | power_management << POWER_MANAGEMENT_SHIFT;
8610 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8611 GENERAL_CONFIG, frame);
8614 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8615 u16 vl15buf, u8 crc_sizes)
8619 frame = (u32)vau << VAU_SHIFT
8621 | (u32)vcu << VCU_SHIFT
8622 | (u32)vl15buf << VL15BUF_SHIFT
8623 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8624 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8625 GENERAL_CONFIG, frame);
8628 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8629 u8 *flag_bits, u16 *link_widths)
8633 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8635 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8636 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8637 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8640 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8647 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8648 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8649 | (u32)link_widths << LINK_WIDTH_SHIFT;
8650 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8654 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8659 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8660 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8661 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8664 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8669 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8670 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8671 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8672 & REMOTE_DEVICE_REV_MASK;
8675 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8679 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8680 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8681 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8684 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8689 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8690 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8691 & POWER_MANAGEMENT_MASK;
8692 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8693 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8696 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8697 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8701 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8702 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8703 *z = (frame >> Z_SHIFT) & Z_MASK;
8704 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8705 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8706 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8709 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8715 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8717 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8718 & REMOTE_TX_RATE_MASK;
8719 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8722 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8726 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8727 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8730 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8734 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8735 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8738 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8740 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8743 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8745 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8748 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8754 if (dd->pport->host_link_state & HLS_UP) {
8755 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8758 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8759 & LINK_QUALITY_MASK;
8763 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8767 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8768 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8771 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8775 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8776 *ldr = (frame & 0xff);
8779 static int read_tx_settings(struct hfi1_devdata *dd,
8781 u8 *tx_polarity_inversion,
8782 u8 *rx_polarity_inversion,
8788 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8789 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8790 & ENABLE_LANE_TX_MASK;
8791 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8792 & TX_POLARITY_INVERSION_MASK;
8793 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8794 & RX_POLARITY_INVERSION_MASK;
8795 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8799 static int write_tx_settings(struct hfi1_devdata *dd,
8801 u8 tx_polarity_inversion,
8802 u8 rx_polarity_inversion,
8807 /* no need to mask, all variable sizes match field widths */
8808 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8809 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8810 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8811 | max_rate << MAX_RATE_SHIFT;
8812 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8816 * Read an idle LCB message.
8818 * Returns 0 on success, -EINVAL on error
8820 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8824 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8825 if (ret != HCMD_SUCCESS) {
8826 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8830 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8831 /* return only the payload as we already know the type */
8832 *data_out >>= IDLE_PAYLOAD_SHIFT;
8837 * Read an idle SMA message. To be done in response to a notification from
8840 * Returns 0 on success, -EINVAL on error
8842 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8844 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8849 * Send an idle LCB message.
8851 * Returns 0 on success, -EINVAL on error
8853 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8857 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8858 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8859 if (ret != HCMD_SUCCESS) {
8860 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8868 * Send an idle SMA message.
8870 * Returns 0 on success, -EINVAL on error
8872 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8876 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8877 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8878 return send_idle_message(dd, data);
8882 * Initialize the LCB then do a quick link up. This may or may not be
8885 * return 0 on success, -errno on error
8887 static int do_quick_linkup(struct hfi1_devdata *dd)
8890 unsigned long timeout;
8893 lcb_shutdown(dd, 0);
8896 /* LCB_CFG_LOOPBACK.VAL = 2 */
8897 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8898 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8899 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8900 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8903 /* start the LCBs */
8904 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8905 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8907 /* simulator only loopback steps */
8908 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8909 /* LCB_CFG_RUN.EN = 1 */
8910 write_csr(dd, DC_LCB_CFG_RUN,
8911 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8913 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8914 timeout = jiffies + msecs_to_jiffies(10);
8916 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8919 if (time_after(jiffies, timeout)) {
8921 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8927 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8928 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8933 * When doing quick linkup and not in loopback, both
8934 * sides must be done with LCB set-up before either
8935 * starts the quick linkup. Put a delay here so that
8936 * both sides can be started and have a chance to be
8937 * done with LCB set up before resuming.
8940 "Pausing for peer to be finished with LCB set up\n");
8942 dd_dev_err(dd, "Continuing with quick linkup\n");
8945 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8946 set_8051_lcb_access(dd);
8949 * State "quick" LinkUp request sets the physical link state to
8950 * LinkUp without a verify capability sequence.
8951 * This state is in simulator v37 and later.
8953 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8954 if (ret != HCMD_SUCCESS) {
8956 "%s: set physical link state to quick LinkUp failed with return %d\n",
8959 set_host_lcb_access(dd);
8960 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8967 return 0; /* success */
8971 * Set the SerDes to internal loopback mode.
8972 * Returns 0 on success, -errno on error.
8974 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8978 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8979 if (ret == HCMD_SUCCESS)
8982 "Set physical link state to SerDes Loopback failed with return %d\n",
8990 * Do all special steps to set up loopback.
8992 static int init_loopback(struct hfi1_devdata *dd)
8994 dd_dev_info(dd, "Entering loopback mode\n");
8996 /* all loopbacks should disable self GUID check */
8997 write_csr(dd, DC_DC8051_CFG_MODE,
8998 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9001 * The simulator has only one loopback option - LCB. Switch
9002 * to that option, which includes quick link up.
9004 * Accept all valid loopback values.
9006 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9007 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9008 loopback == LOOPBACK_CABLE)) {
9009 loopback = LOOPBACK_LCB;
9014 /* handle serdes loopback */
9015 if (loopback == LOOPBACK_SERDES) {
9016 /* internal serdes loopack needs quick linkup on RTL */
9017 if (dd->icode == ICODE_RTL_SILICON)
9019 return set_serdes_loopback_mode(dd);
9022 /* LCB loopback - handled at poll time */
9023 if (loopback == LOOPBACK_LCB) {
9024 quick_linkup = 1; /* LCB is always quick linkup */
9026 /* not supported in emulation due to emulation RTL changes */
9027 if (dd->icode == ICODE_FPGA_EMULATION) {
9029 "LCB loopback not supported in emulation\n");
9035 /* external cable loopback requires no extra steps */
9036 if (loopback == LOOPBACK_CABLE)
9039 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9044 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9045 * used in the Verify Capability link width attribute.
9047 static u16 opa_to_vc_link_widths(u16 opa_widths)
9052 static const struct link_bits {
9055 } opa_link_xlate[] = {
9056 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9057 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9058 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9059 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9062 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9063 if (opa_widths & opa_link_xlate[i].from)
9064 result |= opa_link_xlate[i].to;
9070 * Set link attributes before moving to polling.
9072 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9074 struct hfi1_devdata *dd = ppd->dd;
9076 u8 tx_polarity_inversion;
9077 u8 rx_polarity_inversion;
9080 /* reset our fabric serdes to clear any lingering problems */
9081 fabric_serdes_reset(dd);
9083 /* set the local tx rate - need to read-modify-write */
9084 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9085 &rx_polarity_inversion, &ppd->local_tx_rate);
9087 goto set_local_link_attributes_fail;
9089 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9090 /* set the tx rate to the fastest enabled */
9091 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9092 ppd->local_tx_rate = 1;
9094 ppd->local_tx_rate = 0;
9096 /* set the tx rate to all enabled */
9097 ppd->local_tx_rate = 0;
9098 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9099 ppd->local_tx_rate |= 2;
9100 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9101 ppd->local_tx_rate |= 1;
9104 enable_lane_tx = 0xF; /* enable all four lanes */
9105 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9106 rx_polarity_inversion, ppd->local_tx_rate);
9107 if (ret != HCMD_SUCCESS)
9108 goto set_local_link_attributes_fail;
9111 * DC supports continuous updates.
9113 ret = write_vc_local_phy(dd,
9114 0 /* no power management */,
9115 1 /* continuous updates */);
9116 if (ret != HCMD_SUCCESS)
9117 goto set_local_link_attributes_fail;
9119 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9120 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9121 ppd->port_crc_mode_enabled);
9122 if (ret != HCMD_SUCCESS)
9123 goto set_local_link_attributes_fail;
9125 ret = write_vc_local_link_width(dd, 0, 0,
9126 opa_to_vc_link_widths(
9127 ppd->link_width_enabled));
9128 if (ret != HCMD_SUCCESS)
9129 goto set_local_link_attributes_fail;
9131 /* let peer know who we are */
9132 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9133 if (ret == HCMD_SUCCESS)
9136 set_local_link_attributes_fail:
9138 "Failed to set local link attributes, return 0x%x\n",
9144 * Call this to start the link.
9145 * Do not do anything if the link is disabled.
9146 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9148 int start_link(struct hfi1_pportdata *ppd)
9151 * Tune the SerDes to a ballpark setting for optimal signal and bit
9152 * error rate. Needs to be done before starting the link.
9156 if (!ppd->link_enabled) {
9157 dd_dev_info(ppd->dd,
9158 "%s: stopping link start because link is disabled\n",
9162 if (!ppd->driver_link_ready) {
9163 dd_dev_info(ppd->dd,
9164 "%s: stopping link start because driver is not ready\n",
9170 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9171 * pkey table can be configured properly if the HFI unit is connected
9172 * to switch port with MgmtAllowed=NO
9174 clear_full_mgmt_pkey(ppd);
9176 return set_link_state(ppd, HLS_DN_POLL);
9179 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9181 struct hfi1_devdata *dd = ppd->dd;
9183 unsigned long timeout;
9186 * Some QSFP cables have a quirk that asserts the IntN line as a side
9187 * effect of power up on plug-in. We ignore this false positive
9188 * interrupt until the module has finished powering up by waiting for
9189 * a minimum timeout of the module inrush initialization time of
9190 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9191 * module have stabilized.
9196 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9198 timeout = jiffies + msecs_to_jiffies(2000);
9200 mask = read_csr(dd, dd->hfi1_id ?
9201 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9202 if (!(mask & QSFP_HFI0_INT_N))
9204 if (time_after(jiffies, timeout)) {
9205 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9213 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9215 struct hfi1_devdata *dd = ppd->dd;
9218 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9221 * Clear the status register to avoid an immediate interrupt
9222 * when we re-enable the IntN pin
9224 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9226 mask |= (u64)QSFP_HFI0_INT_N;
9228 mask &= ~(u64)QSFP_HFI0_INT_N;
9230 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9233 void reset_qsfp(struct hfi1_pportdata *ppd)
9235 struct hfi1_devdata *dd = ppd->dd;
9236 u64 mask, qsfp_mask;
9238 /* Disable INT_N from triggering QSFP interrupts */
9239 set_qsfp_int_n(ppd, 0);
9241 /* Reset the QSFP */
9242 mask = (u64)QSFP_HFI0_RESET_N;
9244 qsfp_mask = read_csr(dd,
9245 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9248 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9254 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9256 wait_for_qsfp_init(ppd);
9259 * Allow INT_N to trigger the QSFP interrupt to watch
9260 * for alarms and warnings
9262 set_qsfp_int_n(ppd, 1);
9265 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9266 u8 *qsfp_interrupt_status)
9268 struct hfi1_devdata *dd = ppd->dd;
9270 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9271 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9272 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9275 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9276 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9277 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9281 * The remaining alarms/warnings don't matter if the link is down.
9283 if (ppd->host_link_state & HLS_DOWN)
9286 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9287 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9288 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9291 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9292 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9293 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9296 /* Byte 2 is vendor specific */
9298 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9299 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9300 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9303 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9304 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9305 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9308 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9309 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9310 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9313 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9314 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9315 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9318 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9319 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9320 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9323 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9324 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9325 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9328 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9329 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9330 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9333 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9334 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9335 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9338 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9339 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9340 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9343 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9344 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9345 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9348 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9349 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9350 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9353 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9354 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9355 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9358 /* Bytes 9-10 and 11-12 are reserved */
9359 /* Bytes 13-15 are vendor specific */
9364 /* This routine will only be scheduled if the QSFP module present is asserted */
9365 void qsfp_event(struct work_struct *work)
9367 struct qsfp_data *qd;
9368 struct hfi1_pportdata *ppd;
9369 struct hfi1_devdata *dd;
9371 qd = container_of(work, struct qsfp_data, qsfp_work);
9376 if (!qsfp_mod_present(ppd))
9380 * Turn DC back on after cable has been re-inserted. Up until
9381 * now, the DC has been in reset to save power.
9385 if (qd->cache_refresh_required) {
9386 set_qsfp_int_n(ppd, 0);
9388 wait_for_qsfp_init(ppd);
9391 * Allow INT_N to trigger the QSFP interrupt to watch
9392 * for alarms and warnings
9394 set_qsfp_int_n(ppd, 1);
9399 if (qd->check_interrupt_flags) {
9400 u8 qsfp_interrupt_status[16] = {0,};
9402 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9403 &qsfp_interrupt_status[0], 16) != 16) {
9405 "%s: Failed to read status of QSFP module\n",
9408 unsigned long flags;
9410 handle_qsfp_error_conditions(
9411 ppd, qsfp_interrupt_status);
9412 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9413 ppd->qsfp_info.check_interrupt_flags = 0;
9414 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9420 static void init_qsfp_int(struct hfi1_devdata *dd)
9422 struct hfi1_pportdata *ppd = dd->pport;
9423 u64 qsfp_mask, cce_int_mask;
9424 const int qsfp1_int_smask = QSFP1_INT % 64;
9425 const int qsfp2_int_smask = QSFP2_INT % 64;
9428 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9429 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9430 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9431 * the index of the appropriate CSR in the CCEIntMask CSR array
9433 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9434 (8 * (QSFP1_INT / 64)));
9436 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9437 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9440 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9441 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9445 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9446 /* Clear current status to avoid spurious interrupts */
9447 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9449 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9452 set_qsfp_int_n(ppd, 0);
9454 /* Handle active low nature of INT_N and MODPRST_N pins */
9455 if (qsfp_mod_present(ppd))
9456 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9458 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9463 * Do a one-time initialize of the LCB block.
9465 static void init_lcb(struct hfi1_devdata *dd)
9467 /* simulator does not correctly handle LCB cclk loopback, skip */
9468 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9471 /* the DC has been reset earlier in the driver load */
9473 /* set LCB for cclk loopback on the port */
9474 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9475 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9476 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9477 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9478 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9479 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9480 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9484 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9487 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9492 /* report success if not a QSFP */
9493 if (ppd->port_type != PORT_TYPE_QSFP)
9496 /* read byte 2, the status byte */
9497 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9503 return 0; /* success */
9507 * Values for QSFP retry.
9509 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9510 * arrived at from experience on a large cluster.
9512 #define MAX_QSFP_RETRIES 20
9513 #define QSFP_RETRY_WAIT 500 /* msec */
9516 * Try a QSFP read. If it fails, schedule a retry for later.
9517 * Called on first link activation after driver load.
9519 static void try_start_link(struct hfi1_pportdata *ppd)
9521 if (test_qsfp_read(ppd)) {
9523 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9524 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9527 dd_dev_info(ppd->dd,
9528 "QSFP not responding, waiting and retrying %d\n",
9529 (int)ppd->qsfp_retry_count);
9530 ppd->qsfp_retry_count++;
9531 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9532 msecs_to_jiffies(QSFP_RETRY_WAIT));
9535 ppd->qsfp_retry_count = 0;
9541 * Workqueue function to start the link after a delay.
9543 void handle_start_link(struct work_struct *work)
9545 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9546 start_link_work.work);
9547 try_start_link(ppd);
9550 int bringup_serdes(struct hfi1_pportdata *ppd)
9552 struct hfi1_devdata *dd = ppd->dd;
9556 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9557 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9562 guid = dd->base_guid + ppd->port - 1;
9566 /* Set linkinit_reason on power up per OPA spec */
9567 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9569 /* one-time init of the LCB */
9573 ret = init_loopback(dd);
9579 if (ppd->port_type == PORT_TYPE_QSFP) {
9580 set_qsfp_int_n(ppd, 0);
9581 wait_for_qsfp_init(ppd);
9582 set_qsfp_int_n(ppd, 1);
9585 try_start_link(ppd);
9589 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9591 struct hfi1_devdata *dd = ppd->dd;
9594 * Shut down the link and keep it down. First turn off that the
9595 * driver wants to allow the link to be up (driver_link_ready).
9596 * Then make sure the link is not automatically restarted
9597 * (link_enabled). Cancel any pending restart. And finally
9600 ppd->driver_link_ready = 0;
9601 ppd->link_enabled = 0;
9603 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9604 flush_delayed_work(&ppd->start_link_work);
9605 cancel_delayed_work_sync(&ppd->start_link_work);
9607 ppd->offline_disabled_reason =
9608 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9609 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9610 OPA_LINKDOWN_REASON_SMA_DISABLED);
9611 set_link_state(ppd, HLS_DN_OFFLINE);
9613 /* disable the port */
9614 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9617 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9619 struct hfi1_pportdata *ppd;
9622 ppd = (struct hfi1_pportdata *)(dd + 1);
9623 for (i = 0; i < dd->num_pports; i++, ppd++) {
9624 ppd->ibport_data.rvp.rc_acks = NULL;
9625 ppd->ibport_data.rvp.rc_qacks = NULL;
9626 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9627 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9628 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9629 if (!ppd->ibport_data.rvp.rc_acks ||
9630 !ppd->ibport_data.rvp.rc_delayed_comp ||
9631 !ppd->ibport_data.rvp.rc_qacks)
9638 static const char * const pt_names[] = {
9644 static const char *pt_name(u32 type)
9646 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9650 * index is the index into the receive array
9652 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9653 u32 type, unsigned long pa, u16 order)
9656 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9657 (dd->kregbase + RCV_ARRAY));
9659 if (!(dd->flags & HFI1_PRESENT))
9662 if (type == PT_INVALID) {
9664 } else if (type > PT_INVALID) {
9666 "unexpected receive array type %u for index %u, not handled\n",
9671 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9672 pt_name(type), index, pa, (unsigned long)order);
9674 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9675 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9676 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9677 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9678 << RCV_ARRAY_RT_ADDR_SHIFT;
9679 writeq(reg, base + (index * 8));
9681 if (type == PT_EAGER)
9683 * Eager entries are written one-by-one so we have to push them
9684 * after we write the entry.
9691 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9693 struct hfi1_devdata *dd = rcd->dd;
9696 /* this could be optimized */
9697 for (i = rcd->eager_base; i < rcd->eager_base +
9698 rcd->egrbufs.alloced; i++)
9699 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9701 for (i = rcd->expected_base;
9702 i < rcd->expected_base + rcd->expected_count; i++)
9703 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9706 struct ib_header *hfi1_get_msgheader(
9707 struct hfi1_devdata *dd, __le32 *rhf_addr)
9709 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9711 return (struct ib_header *)
9712 (rhf_addr - dd->rhf_offset + offset);
9715 static const char * const ib_cfg_name_strings[] = {
9716 "HFI1_IB_CFG_LIDLMC",
9717 "HFI1_IB_CFG_LWID_DG_ENB",
9718 "HFI1_IB_CFG_LWID_ENB",
9720 "HFI1_IB_CFG_SPD_ENB",
9722 "HFI1_IB_CFG_RXPOL_ENB",
9723 "HFI1_IB_CFG_LREV_ENB",
9724 "HFI1_IB_CFG_LINKLATENCY",
9725 "HFI1_IB_CFG_HRTBT",
9726 "HFI1_IB_CFG_OP_VLS",
9727 "HFI1_IB_CFG_VL_HIGH_CAP",
9728 "HFI1_IB_CFG_VL_LOW_CAP",
9729 "HFI1_IB_CFG_OVERRUN_THRESH",
9730 "HFI1_IB_CFG_PHYERR_THRESH",
9731 "HFI1_IB_CFG_LINKDEFAULT",
9732 "HFI1_IB_CFG_PKEYS",
9734 "HFI1_IB_CFG_LSTATE",
9735 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9736 "HFI1_IB_CFG_PMA_TICKS",
9740 static const char *ib_cfg_name(int which)
9742 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9744 return ib_cfg_name_strings[which];
9747 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9749 struct hfi1_devdata *dd = ppd->dd;
9753 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9754 val = ppd->link_width_enabled;
9756 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9757 val = ppd->link_width_active;
9759 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9760 val = ppd->link_speed_enabled;
9762 case HFI1_IB_CFG_SPD: /* current Link speed */
9763 val = ppd->link_speed_active;
9766 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9767 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9768 case HFI1_IB_CFG_LINKLATENCY:
9771 case HFI1_IB_CFG_OP_VLS:
9772 val = ppd->vls_operational;
9774 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9775 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9777 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9778 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9780 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9781 val = ppd->overrun_threshold;
9783 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9784 val = ppd->phy_error_threshold;
9786 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9787 val = dd->link_default;
9790 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9791 case HFI1_IB_CFG_PMA_TICKS:
9794 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9797 "%s: which %s: not implemented\n",
9799 ib_cfg_name(which));
9807 * The largest MAD packet size.
9809 #define MAX_MAD_PACKET 2048
9812 * Return the maximum header bytes that can go on the _wire_
9813 * for this device. This count includes the ICRC which is
9814 * not part of the packet held in memory but it is appended
9816 * This is dependent on the device's receive header entry size.
9817 * HFI allows this to be set per-receive context, but the
9818 * driver presently enforces a global value.
9820 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9823 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9824 * the Receive Header Entry Size minus the PBC (or RHF) size
9825 * plus one DW for the ICRC appended by HW.
9827 * dd->rcd[0].rcvhdrqentsize is in DW.
9828 * We use rcd[0] as all context will have the same value. Also,
9829 * the first kernel context would have been allocated by now so
9830 * we are guaranteed a valid value.
9832 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9837 * @ppd - per port data
9839 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9840 * registers compare against LRH.PktLen, so use the max bytes included
9843 * This routine changes all VL values except VL15, which it maintains at
9846 static void set_send_length(struct hfi1_pportdata *ppd)
9848 struct hfi1_devdata *dd = ppd->dd;
9849 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9850 u32 maxvlmtu = dd->vld[15].mtu;
9851 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9852 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9853 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9857 for (i = 0; i < ppd->vls_supported; i++) {
9858 if (dd->vld[i].mtu > maxvlmtu)
9859 maxvlmtu = dd->vld[i].mtu;
9861 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9862 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9863 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9865 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9866 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9867 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9869 write_csr(dd, SEND_LEN_CHECK0, len1);
9870 write_csr(dd, SEND_LEN_CHECK1, len2);
9871 /* adjust kernel credit return thresholds based on new MTUs */
9872 /* all kernel receive contexts have the same hdrqentsize */
9873 for (i = 0; i < ppd->vls_supported; i++) {
9874 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9875 sc_mtu_to_threshold(dd->vld[i].sc,
9877 dd->rcd[0]->rcvhdrqentsize));
9878 for (j = 0; j < INIT_SC_PER_VL; j++)
9879 sc_set_cr_threshold(
9880 pio_select_send_context_vl(dd, j, i),
9883 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9884 sc_mtu_to_threshold(dd->vld[15].sc,
9886 dd->rcd[0]->rcvhdrqentsize));
9887 sc_set_cr_threshold(dd->vld[15].sc, thres);
9889 /* Adjust maximum MTU for the port in DC */
9890 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9891 (ilog2(maxvlmtu >> 8) + 1);
9892 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9893 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9894 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9895 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9896 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9899 static void set_lidlmc(struct hfi1_pportdata *ppd)
9903 struct hfi1_devdata *dd = ppd->dd;
9904 u32 mask = ~((1U << ppd->lmc) - 1);
9905 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9907 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9908 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9909 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9910 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9911 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9912 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9913 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9916 * Iterate over all the send contexts and set their SLID check
9918 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9919 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9920 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9921 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9923 for (i = 0; i < dd->chip_send_contexts; i++) {
9924 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9926 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9929 /* Now we have to do the same thing for the sdma engines */
9930 sdma_update_lmc(dd, mask, ppd->lid);
9933 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9935 unsigned long timeout;
9938 timeout = jiffies + msecs_to_jiffies(msecs);
9940 curr_state = read_physical_state(dd);
9941 if (curr_state == state)
9943 if (time_after(jiffies, timeout)) {
9945 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9949 usleep_range(1950, 2050); /* sleep 2ms-ish */
9955 static const char *state_completed_string(u32 completed)
9957 static const char * const state_completed[] = {
9963 if (completed < ARRAY_SIZE(state_completed))
9964 return state_completed[completed];
9969 static const char all_lanes_dead_timeout_expired[] =
9970 "All lanes were inactive – was the interconnect media removed?";
9971 static const char tx_out_of_policy[] =
9972 "Passing lanes on local port do not meet the local link width policy";
9973 static const char no_state_complete[] =
9974 "State timeout occurred before link partner completed the state";
9975 static const char * const state_complete_reasons[] = {
9976 [0x00] = "Reason unknown",
9977 [0x01] = "Link was halted by driver, refer to LinkDownReason",
9978 [0x02] = "Link partner reported failure",
9979 [0x10] = "Unable to achieve frame sync on any lane",
9981 "Unable to find a common bit rate with the link partner",
9983 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
9985 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
9986 [0x14] = no_state_complete,
9988 "State timeout occurred before link partner identified equalization presets",
9990 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
9991 [0x17] = tx_out_of_policy,
9992 [0x20] = all_lanes_dead_timeout_expired,
9994 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
9995 [0x22] = no_state_complete,
9997 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
9998 [0x24] = tx_out_of_policy,
9999 [0x30] = all_lanes_dead_timeout_expired,
10001 "State timeout occurred waiting for host to process received frames",
10002 [0x32] = no_state_complete,
10004 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10005 [0x34] = tx_out_of_policy,
10008 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10011 const char *str = NULL;
10013 if (code < ARRAY_SIZE(state_complete_reasons))
10014 str = state_complete_reasons[code];
10021 /* describe the given last state complete frame */
10022 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10023 const char *prefix)
10025 struct hfi1_devdata *dd = ppd->dd;
10033 * [ 0: 0] - success
10035 * [ 7: 4] - next state timeout
10036 * [15: 8] - reason code
10039 success = frame & 0x1;
10040 state = (frame >> 1) & 0x7;
10041 reason = (frame >> 8) & 0xff;
10042 lanes = (frame >> 16) & 0xffff;
10044 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10046 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10047 state_completed_string(state), state);
10048 dd_dev_err(dd, " state successfully completed: %s\n",
10049 success ? "yes" : "no");
10050 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10051 reason, state_complete_reason_code_string(ppd, reason));
10052 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10056 * Read the last state complete frames and explain them. This routine
10057 * expects to be called if the link went down during link negotiation
10058 * and initialization (LNI). That is, anywhere between polling and link up.
10060 static void check_lni_states(struct hfi1_pportdata *ppd)
10062 u32 last_local_state;
10063 u32 last_remote_state;
10065 read_last_local_state(ppd->dd, &last_local_state);
10066 read_last_remote_state(ppd->dd, &last_remote_state);
10069 * Don't report anything if there is nothing to report. A value of
10070 * 0 means the link was taken down while polling and there was no
10071 * training in-process.
10073 if (last_local_state == 0 && last_remote_state == 0)
10076 decode_state_complete(ppd, last_local_state, "transmitted");
10077 decode_state_complete(ppd, last_remote_state, "received");
10081 * Helper for set_link_state(). Do not call except from that routine.
10082 * Expects ppd->hls_mutex to be held.
10084 * @rem_reason value to be sent to the neighbor
10086 * LinkDownReasons only set if transition succeeds.
10088 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10090 struct hfi1_devdata *dd = ppd->dd;
10091 u32 pstate, previous_state;
10096 previous_state = ppd->host_link_state;
10097 ppd->host_link_state = HLS_GOING_OFFLINE;
10098 pstate = read_physical_state(dd);
10099 if (pstate == PLS_OFFLINE) {
10100 do_transition = 0; /* in right state */
10101 do_wait = 0; /* ...no need to wait */
10102 } else if ((pstate & 0xff) == PLS_OFFLINE) {
10103 do_transition = 0; /* in an offline transient state */
10104 do_wait = 1; /* ...wait for it to settle */
10106 do_transition = 1; /* need to move to offline */
10107 do_wait = 1; /* ...will need to wait */
10110 if (do_transition) {
10111 ret = set_physical_link_state(dd,
10112 (rem_reason << 8) | PLS_OFFLINE);
10114 if (ret != HCMD_SUCCESS) {
10116 "Failed to transition to Offline link state, return %d\n",
10120 if (ppd->offline_disabled_reason ==
10121 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10122 ppd->offline_disabled_reason =
10123 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10127 /* it can take a while for the link to go down */
10128 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
10133 /* make sure the logical state is also down */
10134 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10137 * Now in charge of LCB - must be after the physical state is
10138 * offline.quiet and before host_link_state is changed.
10140 set_host_lcb_access(dd);
10141 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10142 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10144 if (ppd->port_type == PORT_TYPE_QSFP &&
10145 ppd->qsfp_info.limiting_active &&
10146 qsfp_mod_present(ppd)) {
10149 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10151 set_qsfp_tx(ppd, 0);
10152 release_chip_resource(dd, qsfp_resource(dd));
10154 /* not fatal, but should warn */
10156 "Unable to acquire lock to turn off QSFP TX\n");
10161 * The LNI has a mandatory wait time after the physical state
10162 * moves to Offline.Quiet. The wait time may be different
10163 * depending on how the link went down. The 8051 firmware
10164 * will observe the needed wait time and only move to ready
10165 * when that is completed. The largest of the quiet timeouts
10166 * is 6s, so wait that long and then at least 0.5s more for
10167 * other transitions, and another 0.5s for a buffer.
10169 ret = wait_fm_ready(dd, 7000);
10172 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10173 /* state is really offline, so make it so */
10174 ppd->host_link_state = HLS_DN_OFFLINE;
10179 * The state is now offline and the 8051 is ready to accept host
10181 * - change our state
10182 * - notify others if we were previously in a linkup state
10184 ppd->host_link_state = HLS_DN_OFFLINE;
10185 if (previous_state & HLS_UP) {
10186 /* went down while link was up */
10187 handle_linkup_change(dd, 0);
10188 } else if (previous_state
10189 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10190 /* went down while attempting link up */
10191 check_lni_states(ppd);
10194 /* the active link width (downgrade) is 0 on link down */
10195 ppd->link_width_active = 0;
10196 ppd->link_width_downgrade_tx_active = 0;
10197 ppd->link_width_downgrade_rx_active = 0;
10198 ppd->current_egress_rate = 0;
10202 /* return the link state name */
10203 static const char *link_state_name(u32 state)
10206 int n = ilog2(state);
10207 static const char * const names[] = {
10208 [__HLS_UP_INIT_BP] = "INIT",
10209 [__HLS_UP_ARMED_BP] = "ARMED",
10210 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10211 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10212 [__HLS_DN_POLL_BP] = "POLL",
10213 [__HLS_DN_DISABLE_BP] = "DISABLE",
10214 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10215 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10216 [__HLS_GOING_UP_BP] = "GOING_UP",
10217 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10218 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10221 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10222 return name ? name : "unknown";
10225 /* return the link state reason name */
10226 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10228 if (state == HLS_UP_INIT) {
10229 switch (ppd->linkinit_reason) {
10230 case OPA_LINKINIT_REASON_LINKUP:
10232 case OPA_LINKINIT_REASON_FLAPPING:
10233 return "(FLAPPING)";
10234 case OPA_LINKINIT_OUTSIDE_POLICY:
10235 return "(OUTSIDE_POLICY)";
10236 case OPA_LINKINIT_QUARANTINED:
10237 return "(QUARANTINED)";
10238 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10239 return "(INSUFIC_CAPABILITY)";
10248 * driver_physical_state - convert the driver's notion of a port's
10249 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10250 * Return -1 (converted to a u32) to indicate error.
10252 u32 driver_physical_state(struct hfi1_pportdata *ppd)
10254 switch (ppd->host_link_state) {
10257 case HLS_UP_ACTIVE:
10258 return IB_PORTPHYSSTATE_LINKUP;
10260 return IB_PORTPHYSSTATE_POLLING;
10261 case HLS_DN_DISABLE:
10262 return IB_PORTPHYSSTATE_DISABLED;
10263 case HLS_DN_OFFLINE:
10264 return OPA_PORTPHYSSTATE_OFFLINE;
10265 case HLS_VERIFY_CAP:
10266 return IB_PORTPHYSSTATE_POLLING;
10268 return IB_PORTPHYSSTATE_POLLING;
10269 case HLS_GOING_OFFLINE:
10270 return OPA_PORTPHYSSTATE_OFFLINE;
10271 case HLS_LINK_COOLDOWN:
10272 return OPA_PORTPHYSSTATE_OFFLINE;
10273 case HLS_DN_DOWNDEF:
10275 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10276 ppd->host_link_state);
10282 * driver_logical_state - convert the driver's notion of a port's
10283 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10284 * (converted to a u32) to indicate error.
10286 u32 driver_logical_state(struct hfi1_pportdata *ppd)
10288 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10289 return IB_PORT_DOWN;
10291 switch (ppd->host_link_state & HLS_UP) {
10293 return IB_PORT_INIT;
10295 return IB_PORT_ARMED;
10296 case HLS_UP_ACTIVE:
10297 return IB_PORT_ACTIVE;
10299 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10300 ppd->host_link_state);
10305 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10306 u8 neigh_reason, u8 rem_reason)
10308 if (ppd->local_link_down_reason.latest == 0 &&
10309 ppd->neigh_link_down_reason.latest == 0) {
10310 ppd->local_link_down_reason.latest = lcl_reason;
10311 ppd->neigh_link_down_reason.latest = neigh_reason;
10312 ppd->remote_link_down_reason = rem_reason;
10317 * Change the physical and/or logical link state.
10319 * Do not call this routine while inside an interrupt. It contains
10320 * calls to routines that can take multiple seconds to finish.
10322 * Returns 0 on success, -errno on failure.
10324 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10326 struct hfi1_devdata *dd = ppd->dd;
10327 struct ib_event event = {.device = NULL};
10329 int orig_new_state, poll_bounce;
10331 mutex_lock(&ppd->hls_lock);
10333 orig_new_state = state;
10334 if (state == HLS_DN_DOWNDEF)
10335 state = dd->link_default;
10337 /* interpret poll -> poll as a link bounce */
10338 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10339 state == HLS_DN_POLL;
10341 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10342 link_state_name(ppd->host_link_state),
10343 link_state_name(orig_new_state),
10344 poll_bounce ? "(bounce) " : "",
10345 link_state_reason_name(ppd, state));
10348 * If we're going to a (HLS_*) link state that implies the logical
10349 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10350 * reset is_sm_config_started to 0.
10352 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10353 ppd->is_sm_config_started = 0;
10356 * Do nothing if the states match. Let a poll to poll link bounce
10359 if (ppd->host_link_state == state && !poll_bounce)
10364 if (ppd->host_link_state == HLS_DN_POLL &&
10365 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10367 * Quick link up jumps from polling to here.
10369 * Whether in normal or loopback mode, the
10370 * simulator jumps from polling to link up.
10371 * Accept that here.
10374 } else if (ppd->host_link_state != HLS_GOING_UP) {
10378 ppd->host_link_state = HLS_UP_INIT;
10379 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10381 /* logical state didn't change, stay at going_up */
10382 ppd->host_link_state = HLS_GOING_UP;
10384 "%s: logical state did not change to INIT\n",
10387 /* clear old transient LINKINIT_REASON code */
10388 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10389 ppd->linkinit_reason =
10390 OPA_LINKINIT_REASON_LINKUP;
10392 /* enable the port */
10393 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10395 handle_linkup_change(dd, 1);
10399 if (ppd->host_link_state != HLS_UP_INIT)
10402 ppd->host_link_state = HLS_UP_ARMED;
10403 set_logical_state(dd, LSTATE_ARMED);
10404 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10406 /* logical state didn't change, stay at init */
10407 ppd->host_link_state = HLS_UP_INIT;
10409 "%s: logical state did not change to ARMED\n",
10413 * The simulator does not currently implement SMA messages,
10414 * so neighbor_normal is not set. Set it here when we first
10417 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10418 ppd->neighbor_normal = 1;
10420 case HLS_UP_ACTIVE:
10421 if (ppd->host_link_state != HLS_UP_ARMED)
10424 ppd->host_link_state = HLS_UP_ACTIVE;
10425 set_logical_state(dd, LSTATE_ACTIVE);
10426 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10428 /* logical state didn't change, stay at armed */
10429 ppd->host_link_state = HLS_UP_ARMED;
10431 "%s: logical state did not change to ACTIVE\n",
10434 /* tell all engines to go running */
10435 sdma_all_running(dd);
10437 /* Signal the IB layer that the port has went active */
10438 event.device = &dd->verbs_dev.rdi.ibdev;
10439 event.element.port_num = ppd->port;
10440 event.event = IB_EVENT_PORT_ACTIVE;
10444 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10445 ppd->host_link_state == HLS_DN_OFFLINE) &&
10448 /* Hand LED control to the DC */
10449 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10451 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10452 u8 tmp = ppd->link_enabled;
10454 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10456 ppd->link_enabled = tmp;
10459 ppd->remote_link_down_reason = 0;
10461 if (ppd->driver_link_ready)
10462 ppd->link_enabled = 1;
10465 set_all_slowpath(ppd->dd);
10466 ret = set_local_link_attributes(ppd);
10470 ppd->port_error_action = 0;
10471 ppd->host_link_state = HLS_DN_POLL;
10473 if (quick_linkup) {
10474 /* quick linkup does not go into polling */
10475 ret = do_quick_linkup(dd);
10477 ret1 = set_physical_link_state(dd, PLS_POLLING);
10478 if (ret1 != HCMD_SUCCESS) {
10480 "Failed to transition to Polling link state, return 0x%x\n",
10485 ppd->offline_disabled_reason =
10486 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10488 * If an error occurred above, go back to offline. The
10489 * caller may reschedule another attempt.
10492 goto_offline(ppd, 0);
10494 case HLS_DN_DISABLE:
10495 /* link is disabled */
10496 ppd->link_enabled = 0;
10498 /* allow any state to transition to disabled */
10500 /* must transition to offline first */
10501 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10502 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10505 ppd->remote_link_down_reason = 0;
10508 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10509 if (ret1 != HCMD_SUCCESS) {
10511 "Failed to transition to Disabled link state, return 0x%x\n",
10516 ppd->host_link_state = HLS_DN_DISABLE;
10519 case HLS_DN_OFFLINE:
10520 if (ppd->host_link_state == HLS_DN_DISABLE)
10523 /* allow any state to transition to offline */
10524 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10526 ppd->remote_link_down_reason = 0;
10528 case HLS_VERIFY_CAP:
10529 if (ppd->host_link_state != HLS_DN_POLL)
10531 ppd->host_link_state = HLS_VERIFY_CAP;
10534 if (ppd->host_link_state != HLS_VERIFY_CAP)
10537 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10538 if (ret1 != HCMD_SUCCESS) {
10540 "Failed to transition to link up state, return 0x%x\n",
10545 ppd->host_link_state = HLS_GOING_UP;
10548 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10549 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10551 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10560 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10561 __func__, link_state_name(ppd->host_link_state),
10562 link_state_name(state));
10566 mutex_unlock(&ppd->hls_lock);
10569 ib_dispatch_event(&event);
10574 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10580 case HFI1_IB_CFG_LIDLMC:
10583 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10585 * The VL Arbitrator high limit is sent in units of 4k
10586 * bytes, while HFI stores it in units of 64 bytes.
10589 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10590 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10591 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10593 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10594 /* HFI only supports POLL as the default link down state */
10595 if (val != HLS_DN_POLL)
10598 case HFI1_IB_CFG_OP_VLS:
10599 if (ppd->vls_operational != val) {
10600 ppd->vls_operational = val;
10606 * For link width, link width downgrade, and speed enable, always AND
10607 * the setting with what is actually supported. This has two benefits.
10608 * First, enabled can't have unsupported values, no matter what the
10609 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10610 * "fill in with your supported value" have all the bits in the
10611 * field set, so simply ANDing with supported has the desired result.
10613 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10614 ppd->link_width_enabled = val & ppd->link_width_supported;
10616 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10617 ppd->link_width_downgrade_enabled =
10618 val & ppd->link_width_downgrade_supported;
10620 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10621 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10623 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10625 * HFI does not follow IB specs, save this value
10626 * so we can report it, if asked.
10628 ppd->overrun_threshold = val;
10630 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10632 * HFI does not follow IB specs, save this value
10633 * so we can report it, if asked.
10635 ppd->phy_error_threshold = val;
10638 case HFI1_IB_CFG_MTU:
10639 set_send_length(ppd);
10642 case HFI1_IB_CFG_PKEYS:
10643 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10644 set_partition_keys(ppd);
10648 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10649 dd_dev_info(ppd->dd,
10650 "%s: which %s, val 0x%x: not implemented\n",
10651 __func__, ib_cfg_name(which), val);
10657 /* begin functions related to vl arbitration table caching */
10658 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10662 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10663 VL_ARB_LOW_PRIO_TABLE_SIZE);
10664 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10665 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10668 * Note that we always return values directly from the
10669 * 'vl_arb_cache' (and do no CSR reads) in response to a
10670 * 'Get(VLArbTable)'. This is obviously correct after a
10671 * 'Set(VLArbTable)', since the cache will then be up to
10672 * date. But it's also correct prior to any 'Set(VLArbTable)'
10673 * since then both the cache, and the relevant h/w registers
10677 for (i = 0; i < MAX_PRIO_TABLE; i++)
10678 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10682 * vl_arb_lock_cache
10684 * All other vl_arb_* functions should be called only after locking
10687 static inline struct vl_arb_cache *
10688 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10690 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10692 spin_lock(&ppd->vl_arb_cache[idx].lock);
10693 return &ppd->vl_arb_cache[idx];
10696 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10698 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10701 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10702 struct ib_vl_weight_elem *vl)
10704 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10707 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10708 struct ib_vl_weight_elem *vl)
10710 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10713 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10714 struct ib_vl_weight_elem *vl)
10716 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10719 /* end functions related to vl arbitration table caching */
10721 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10722 u32 size, struct ib_vl_weight_elem *vl)
10724 struct hfi1_devdata *dd = ppd->dd;
10726 unsigned int i, is_up = 0;
10727 int drain, ret = 0;
10729 mutex_lock(&ppd->hls_lock);
10731 if (ppd->host_link_state & HLS_UP)
10734 drain = !is_ax(dd) && is_up;
10738 * Before adjusting VL arbitration weights, empty per-VL
10739 * FIFOs, otherwise a packet whose VL weight is being
10740 * set to 0 could get stuck in a FIFO with no chance to
10743 ret = stop_drain_data_vls(dd);
10748 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10753 for (i = 0; i < size; i++, vl++) {
10755 * NOTE: The low priority shift and mask are used here, but
10756 * they are the same for both the low and high registers.
10758 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10759 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10760 | (((u64)vl->weight
10761 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10762 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10763 write_csr(dd, target + (i * 8), reg);
10765 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10768 open_fill_data_vls(dd); /* reopen all VLs */
10771 mutex_unlock(&ppd->hls_lock);
10777 * Read one credit merge VL register.
10779 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10780 struct vl_limit *vll)
10782 u64 reg = read_csr(dd, csr);
10784 vll->dedicated = cpu_to_be16(
10785 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10786 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10787 vll->shared = cpu_to_be16(
10788 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10789 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10793 * Read the current credit merge limits.
10795 static int get_buffer_control(struct hfi1_devdata *dd,
10796 struct buffer_control *bc, u16 *overall_limit)
10801 /* not all entries are filled in */
10802 memset(bc, 0, sizeof(*bc));
10804 /* OPA and HFI have a 1-1 mapping */
10805 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10806 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10808 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10809 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10811 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10812 bc->overall_shared_limit = cpu_to_be16(
10813 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10814 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10816 *overall_limit = (reg
10817 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10818 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10819 return sizeof(struct buffer_control);
10822 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10827 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10828 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10829 for (i = 0; i < sizeof(u64); i++) {
10830 u8 byte = *(((u8 *)®) + i);
10832 dp->vlnt[2 * i] = byte & 0xf;
10833 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10836 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10837 for (i = 0; i < sizeof(u64); i++) {
10838 u8 byte = *(((u8 *)®) + i);
10840 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10841 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10843 return sizeof(struct sc2vlnt);
10846 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10847 struct ib_vl_weight_elem *vl)
10851 for (i = 0; i < nelems; i++, vl++) {
10857 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10859 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10861 0, dp->vlnt[0] & 0xf,
10862 1, dp->vlnt[1] & 0xf,
10863 2, dp->vlnt[2] & 0xf,
10864 3, dp->vlnt[3] & 0xf,
10865 4, dp->vlnt[4] & 0xf,
10866 5, dp->vlnt[5] & 0xf,
10867 6, dp->vlnt[6] & 0xf,
10868 7, dp->vlnt[7] & 0xf,
10869 8, dp->vlnt[8] & 0xf,
10870 9, dp->vlnt[9] & 0xf,
10871 10, dp->vlnt[10] & 0xf,
10872 11, dp->vlnt[11] & 0xf,
10873 12, dp->vlnt[12] & 0xf,
10874 13, dp->vlnt[13] & 0xf,
10875 14, dp->vlnt[14] & 0xf,
10876 15, dp->vlnt[15] & 0xf));
10877 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10878 DC_SC_VL_VAL(31_16,
10879 16, dp->vlnt[16] & 0xf,
10880 17, dp->vlnt[17] & 0xf,
10881 18, dp->vlnt[18] & 0xf,
10882 19, dp->vlnt[19] & 0xf,
10883 20, dp->vlnt[20] & 0xf,
10884 21, dp->vlnt[21] & 0xf,
10885 22, dp->vlnt[22] & 0xf,
10886 23, dp->vlnt[23] & 0xf,
10887 24, dp->vlnt[24] & 0xf,
10888 25, dp->vlnt[25] & 0xf,
10889 26, dp->vlnt[26] & 0xf,
10890 27, dp->vlnt[27] & 0xf,
10891 28, dp->vlnt[28] & 0xf,
10892 29, dp->vlnt[29] & 0xf,
10893 30, dp->vlnt[30] & 0xf,
10894 31, dp->vlnt[31] & 0xf));
10897 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10901 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10902 what, (int)limit, idx);
10905 /* change only the shared limit portion of SendCmGLobalCredit */
10906 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10910 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10911 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10912 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10913 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10916 /* change only the total credit limit portion of SendCmGLobalCredit */
10917 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10921 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10922 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10923 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10924 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10927 /* set the given per-VL shared limit */
10928 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10933 if (vl < TXE_NUM_DATA_VL)
10934 addr = SEND_CM_CREDIT_VL + (8 * vl);
10936 addr = SEND_CM_CREDIT_VL15;
10938 reg = read_csr(dd, addr);
10939 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10940 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10941 write_csr(dd, addr, reg);
10944 /* set the given per-VL dedicated limit */
10945 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10950 if (vl < TXE_NUM_DATA_VL)
10951 addr = SEND_CM_CREDIT_VL + (8 * vl);
10953 addr = SEND_CM_CREDIT_VL15;
10955 reg = read_csr(dd, addr);
10956 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10957 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10958 write_csr(dd, addr, reg);
10961 /* spin until the given per-VL status mask bits clear */
10962 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10965 unsigned long timeout;
10968 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10970 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10973 return; /* success */
10974 if (time_after(jiffies, timeout))
10975 break; /* timed out */
10980 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10981 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10983 * If this occurs, it is likely there was a credit loss on the link.
10984 * The only recovery from that is a link bounce.
10987 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10991 * The number of credits on the VLs may be changed while everything
10992 * is "live", but the following algorithm must be followed due to
10993 * how the hardware is actually implemented. In particular,
10994 * Return_Credit_Status[] is the only correct status check.
10996 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10997 * set Global_Shared_Credit_Limit = 0
10999 * mask0 = all VLs that are changing either dedicated or shared limits
11000 * set Shared_Limit[mask0] = 0
11001 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11002 * if (changing any dedicated limit)
11003 * mask1 = all VLs that are lowering dedicated limits
11004 * lower Dedicated_Limit[mask1]
11005 * spin until Return_Credit_Status[mask1] == 0
11006 * raise Dedicated_Limits
11007 * raise Shared_Limits
11008 * raise Global_Shared_Credit_Limit
11010 * lower = if the new limit is lower, set the limit to the new value
11011 * raise = if the new limit is higher than the current value (may be changed
11012 * earlier in the algorithm), set the new limit to the new value
11014 int set_buffer_control(struct hfi1_pportdata *ppd,
11015 struct buffer_control *new_bc)
11017 struct hfi1_devdata *dd = ppd->dd;
11018 u64 changing_mask, ld_mask, stat_mask;
11020 int i, use_all_mask;
11021 int this_shared_changing;
11022 int vl_count = 0, ret;
11024 * A0: add the variable any_shared_limit_changing below and in the
11025 * algorithm above. If removing A0 support, it can be removed.
11027 int any_shared_limit_changing;
11028 struct buffer_control cur_bc;
11029 u8 changing[OPA_MAX_VLS];
11030 u8 lowering_dedicated[OPA_MAX_VLS];
11033 const u64 all_mask =
11034 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11035 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11036 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11037 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11038 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11039 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11040 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11041 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11042 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11044 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11045 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11047 /* find the new total credits, do sanity check on unused VLs */
11048 for (i = 0; i < OPA_MAX_VLS; i++) {
11050 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11053 nonzero_msg(dd, i, "dedicated",
11054 be16_to_cpu(new_bc->vl[i].dedicated));
11055 nonzero_msg(dd, i, "shared",
11056 be16_to_cpu(new_bc->vl[i].shared));
11057 new_bc->vl[i].dedicated = 0;
11058 new_bc->vl[i].shared = 0;
11060 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11062 /* fetch the current values */
11063 get_buffer_control(dd, &cur_bc, &cur_total);
11066 * Create the masks we will use.
11068 memset(changing, 0, sizeof(changing));
11069 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11071 * NOTE: Assumes that the individual VL bits are adjacent and in
11075 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11079 any_shared_limit_changing = 0;
11080 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11083 this_shared_changing = new_bc->vl[i].shared
11084 != cur_bc.vl[i].shared;
11085 if (this_shared_changing)
11086 any_shared_limit_changing = 1;
11087 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11088 this_shared_changing) {
11090 changing_mask |= stat_mask;
11093 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11094 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11095 lowering_dedicated[i] = 1;
11096 ld_mask |= stat_mask;
11100 /* bracket the credit change with a total adjustment */
11101 if (new_total > cur_total)
11102 set_global_limit(dd, new_total);
11105 * Start the credit change algorithm.
11108 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11109 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11110 (is_ax(dd) && any_shared_limit_changing)) {
11111 set_global_shared(dd, 0);
11112 cur_bc.overall_shared_limit = 0;
11116 for (i = 0; i < NUM_USABLE_VLS; i++) {
11121 set_vl_shared(dd, i, 0);
11122 cur_bc.vl[i].shared = 0;
11126 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11129 if (change_count > 0) {
11130 for (i = 0; i < NUM_USABLE_VLS; i++) {
11134 if (lowering_dedicated[i]) {
11135 set_vl_dedicated(dd, i,
11136 be16_to_cpu(new_bc->
11138 cur_bc.vl[i].dedicated =
11139 new_bc->vl[i].dedicated;
11143 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11145 /* now raise all dedicated that are going up */
11146 for (i = 0; i < NUM_USABLE_VLS; i++) {
11150 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11151 be16_to_cpu(cur_bc.vl[i].dedicated))
11152 set_vl_dedicated(dd, i,
11153 be16_to_cpu(new_bc->
11158 /* next raise all shared that are going up */
11159 for (i = 0; i < NUM_USABLE_VLS; i++) {
11163 if (be16_to_cpu(new_bc->vl[i].shared) >
11164 be16_to_cpu(cur_bc.vl[i].shared))
11165 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11168 /* finally raise the global shared */
11169 if (be16_to_cpu(new_bc->overall_shared_limit) >
11170 be16_to_cpu(cur_bc.overall_shared_limit))
11171 set_global_shared(dd,
11172 be16_to_cpu(new_bc->overall_shared_limit));
11174 /* bracket the credit change with a total adjustment */
11175 if (new_total < cur_total)
11176 set_global_limit(dd, new_total);
11179 * Determine the actual number of operational VLS using the number of
11180 * dedicated and shared credits for each VL.
11182 if (change_count > 0) {
11183 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11184 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11185 be16_to_cpu(new_bc->vl[i].shared) > 0)
11187 ppd->actual_vls_operational = vl_count;
11188 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11189 ppd->actual_vls_operational :
11190 ppd->vls_operational,
11193 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11194 ppd->actual_vls_operational :
11195 ppd->vls_operational, NULL);
11203 * Read the given fabric manager table. Return the size of the
11204 * table (in bytes) on success, and a negative error code on
11207 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11211 struct vl_arb_cache *vlc;
11214 case FM_TBL_VL_HIGH_ARB:
11217 * OPA specifies 128 elements (of 2 bytes each), though
11218 * HFI supports only 16 elements in h/w.
11220 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11221 vl_arb_get_cache(vlc, t);
11222 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11224 case FM_TBL_VL_LOW_ARB:
11227 * OPA specifies 128 elements (of 2 bytes each), though
11228 * HFI supports only 16 elements in h/w.
11230 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11231 vl_arb_get_cache(vlc, t);
11232 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11234 case FM_TBL_BUFFER_CONTROL:
11235 size = get_buffer_control(ppd->dd, t, NULL);
11237 case FM_TBL_SC2VLNT:
11238 size = get_sc2vlnt(ppd->dd, t);
11240 case FM_TBL_VL_PREEMPT_ELEMS:
11242 /* OPA specifies 128 elements, of 2 bytes each */
11243 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11245 case FM_TBL_VL_PREEMPT_MATRIX:
11248 * OPA specifies that this is the same size as the VL
11249 * arbitration tables (i.e., 256 bytes).
11259 * Write the given fabric manager table.
11261 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11264 struct vl_arb_cache *vlc;
11267 case FM_TBL_VL_HIGH_ARB:
11268 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11269 if (vl_arb_match_cache(vlc, t)) {
11270 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11273 vl_arb_set_cache(vlc, t);
11274 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11275 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11276 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11278 case FM_TBL_VL_LOW_ARB:
11279 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11280 if (vl_arb_match_cache(vlc, t)) {
11281 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11284 vl_arb_set_cache(vlc, t);
11285 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11286 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11287 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11289 case FM_TBL_BUFFER_CONTROL:
11290 ret = set_buffer_control(ppd, t);
11292 case FM_TBL_SC2VLNT:
11293 set_sc2vlnt(ppd->dd, t);
11302 * Disable all data VLs.
11304 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11306 static int disable_data_vls(struct hfi1_devdata *dd)
11311 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11317 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11318 * Just re-enables all data VLs (the "fill" part happens
11319 * automatically - the name was chosen for symmetry with
11320 * stop_drain_data_vls()).
11322 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11324 int open_fill_data_vls(struct hfi1_devdata *dd)
11329 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11335 * drain_data_vls() - assumes that disable_data_vls() has been called,
11336 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11337 * engines to drop to 0.
11339 static void drain_data_vls(struct hfi1_devdata *dd)
11343 pause_for_credit_return(dd);
11347 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11349 * Use open_fill_data_vls() to resume using data VLs. This pair is
11350 * meant to be used like this:
11352 * stop_drain_data_vls(dd);
11353 * // do things with per-VL resources
11354 * open_fill_data_vls(dd);
11356 int stop_drain_data_vls(struct hfi1_devdata *dd)
11360 ret = disable_data_vls(dd);
11362 drain_data_vls(dd);
11368 * Convert a nanosecond time to a cclock count. No matter how slow
11369 * the cclock, a non-zero ns will always have a non-zero result.
11371 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11375 if (dd->icode == ICODE_FPGA_EMULATION)
11376 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11377 else /* simulation pretends to be ASIC */
11378 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11379 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11385 * Convert a cclock count to nanoseconds. Not matter how slow
11386 * the cclock, a non-zero cclocks will always have a non-zero result.
11388 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11392 if (dd->icode == ICODE_FPGA_EMULATION)
11393 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11394 else /* simulation pretends to be ASIC */
11395 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11396 if (cclocks && !ns)
11402 * Dynamically adjust the receive interrupt timeout for a context based on
11403 * incoming packet rate.
11405 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11407 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11409 struct hfi1_devdata *dd = rcd->dd;
11410 u32 timeout = rcd->rcvavail_timeout;
11413 * This algorithm doubles or halves the timeout depending on whether
11414 * the number of packets received in this interrupt were less than or
11415 * greater equal the interrupt count.
11417 * The calculations below do not allow a steady state to be achieved.
11418 * Only at the endpoints it is possible to have an unchanging
11421 if (npkts < rcv_intr_count) {
11423 * Not enough packets arrived before the timeout, adjust
11424 * timeout downward.
11426 if (timeout < 2) /* already at minimum? */
11431 * More than enough packets arrived before the timeout, adjust
11434 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11436 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11439 rcd->rcvavail_timeout = timeout;
11441 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11442 * been verified to be in range
11444 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11446 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11449 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11450 u32 intr_adjust, u32 npkts)
11452 struct hfi1_devdata *dd = rcd->dd;
11454 u32 ctxt = rcd->ctxt;
11457 * Need to write timeout register before updating RcvHdrHead to ensure
11458 * that a new value is used when the HW decides to restart counting.
11461 adjust_rcv_timeout(rcd, npkts);
11463 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11464 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11465 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11468 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11469 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11470 << RCV_HDR_HEAD_HEAD_SHIFT);
11471 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11475 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11479 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11480 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11482 if (rcd->rcvhdrtail_kvaddr)
11483 tail = get_rcvhdrtail(rcd);
11485 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11487 return head == tail;
11491 * Context Control and Receive Array encoding for buffer size:
11500 * 0x8 512 KB (Receive Array only)
11501 * 0x9 1 MB (Receive Array only)
11502 * 0xa 2 MB (Receive Array only)
11504 * 0xB-0xF - reserved (Receive Array only)
11507 * This routine assumes that the value has already been sanity checked.
11509 static u32 encoded_size(u32 size)
11512 case 4 * 1024: return 0x1;
11513 case 8 * 1024: return 0x2;
11514 case 16 * 1024: return 0x3;
11515 case 32 * 1024: return 0x4;
11516 case 64 * 1024: return 0x5;
11517 case 128 * 1024: return 0x6;
11518 case 256 * 1024: return 0x7;
11519 case 512 * 1024: return 0x8;
11520 case 1 * 1024 * 1024: return 0x9;
11521 case 2 * 1024 * 1024: return 0xa;
11523 return 0x1; /* if invalid, go with the minimum size */
11526 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11528 struct hfi1_ctxtdata *rcd;
11530 int did_enable = 0;
11532 rcd = dd->rcd[ctxt];
11536 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11538 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11539 /* if the context already enabled, don't do the extra steps */
11540 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11541 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11542 /* reset the tail and hdr addresses, and sequence count */
11543 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11545 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11546 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11547 rcd->rcvhdrqtailaddr_dma);
11550 /* reset the cached receive header queue head value */
11554 * Zero the receive header queue so we don't get false
11555 * positives when checking the sequence number. The
11556 * sequence numbers could land exactly on the same spot.
11557 * E.g. a rcd restart before the receive header wrapped.
11559 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11561 /* starting timeout */
11562 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11564 /* enable the context */
11565 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11567 /* clean the egr buffer size first */
11568 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11569 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11570 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11571 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11573 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11574 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11577 /* zero RcvEgrIndexHead */
11578 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11580 /* set eager count and base index */
11581 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11582 & RCV_EGR_CTRL_EGR_CNT_MASK)
11583 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11584 (((rcd->eager_base >> RCV_SHIFT)
11585 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11586 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11587 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11590 * Set TID (expected) count and base index.
11591 * rcd->expected_count is set to individual RcvArray entries,
11592 * not pairs, and the CSR takes a pair-count in groups of
11593 * four, so divide by 8.
11595 reg = (((rcd->expected_count >> RCV_SHIFT)
11596 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11597 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11598 (((rcd->expected_base >> RCV_SHIFT)
11599 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11600 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11601 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11602 if (ctxt == HFI1_CTRL_CTXT)
11603 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11605 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11606 write_csr(dd, RCV_VL15, 0);
11608 * When receive context is being disabled turn on tail
11609 * update with a dummy tail address and then disable
11612 if (dd->rcvhdrtail_dummy_dma) {
11613 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11614 dd->rcvhdrtail_dummy_dma);
11615 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11616 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11619 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11621 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11622 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11623 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11624 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11625 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11626 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11627 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11628 /* See comment on RcvCtxtCtrl.TailUpd above */
11629 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11630 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11632 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11633 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11634 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11635 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11636 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11638 * In one-packet-per-eager mode, the size comes from
11639 * the RcvArray entry.
11641 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11642 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11644 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11645 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11646 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11647 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11648 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11649 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11650 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11651 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11652 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11653 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11654 rcd->rcvctrl = rcvctrl;
11655 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11656 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11658 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11660 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11661 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11663 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11665 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11666 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11667 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11668 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11669 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11670 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11671 ctxt, reg, reg == 0 ? "not" : "still");
11677 * The interrupt timeout and count must be set after
11678 * the context is enabled to take effect.
11680 /* set interrupt timeout */
11681 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11682 (u64)rcd->rcvavail_timeout <<
11683 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11685 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11686 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11687 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11690 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11692 * If the context has been disabled and the Tail Update has
11693 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11694 * so it doesn't contain an address that is invalid.
11696 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11697 dd->rcvhdrtail_dummy_dma);
11700 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11706 ret = dd->cntrnameslen;
11707 *namep = dd->cntrnames;
11709 const struct cntr_entry *entry;
11712 ret = (dd->ndevcntrs) * sizeof(u64);
11714 /* Get the start of the block of counters */
11715 *cntrp = dd->cntrs;
11718 * Now go and fill in each counter in the block.
11720 for (i = 0; i < DEV_CNTR_LAST; i++) {
11721 entry = &dev_cntrs[i];
11722 hfi1_cdbg(CNTR, "reading %s", entry->name);
11723 if (entry->flags & CNTR_DISABLED) {
11725 hfi1_cdbg(CNTR, "\tDisabled\n");
11727 if (entry->flags & CNTR_VL) {
11728 hfi1_cdbg(CNTR, "\tPer VL\n");
11729 for (j = 0; j < C_VL_COUNT; j++) {
11730 val = entry->rw_cntr(entry,
11736 "\t\tRead 0x%llx for %d\n",
11738 dd->cntrs[entry->offset + j] =
11741 } else if (entry->flags & CNTR_SDMA) {
11743 "\t Per SDMA Engine\n");
11744 for (j = 0; j < dd->chip_sdma_engines;
11747 entry->rw_cntr(entry, dd, j,
11750 "\t\tRead 0x%llx for %d\n",
11752 dd->cntrs[entry->offset + j] =
11756 val = entry->rw_cntr(entry, dd,
11759 dd->cntrs[entry->offset] = val;
11760 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11769 * Used by sysfs to create files for hfi stats to read
11771 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11777 ret = ppd->dd->portcntrnameslen;
11778 *namep = ppd->dd->portcntrnames;
11780 const struct cntr_entry *entry;
11783 ret = ppd->dd->nportcntrs * sizeof(u64);
11784 *cntrp = ppd->cntrs;
11786 for (i = 0; i < PORT_CNTR_LAST; i++) {
11787 entry = &port_cntrs[i];
11788 hfi1_cdbg(CNTR, "reading %s", entry->name);
11789 if (entry->flags & CNTR_DISABLED) {
11791 hfi1_cdbg(CNTR, "\tDisabled\n");
11795 if (entry->flags & CNTR_VL) {
11796 hfi1_cdbg(CNTR, "\tPer VL");
11797 for (j = 0; j < C_VL_COUNT; j++) {
11798 val = entry->rw_cntr(entry, ppd, j,
11803 "\t\tRead 0x%llx for %d",
11805 ppd->cntrs[entry->offset + j] = val;
11808 val = entry->rw_cntr(entry, ppd,
11812 ppd->cntrs[entry->offset] = val;
11813 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11820 static void free_cntrs(struct hfi1_devdata *dd)
11822 struct hfi1_pportdata *ppd;
11825 if (dd->synth_stats_timer.data)
11826 del_timer_sync(&dd->synth_stats_timer);
11827 dd->synth_stats_timer.data = 0;
11828 ppd = (struct hfi1_pportdata *)(dd + 1);
11829 for (i = 0; i < dd->num_pports; i++, ppd++) {
11831 kfree(ppd->scntrs);
11832 free_percpu(ppd->ibport_data.rvp.rc_acks);
11833 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11834 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11836 ppd->scntrs = NULL;
11837 ppd->ibport_data.rvp.rc_acks = NULL;
11838 ppd->ibport_data.rvp.rc_qacks = NULL;
11839 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11841 kfree(dd->portcntrnames);
11842 dd->portcntrnames = NULL;
11847 kfree(dd->cntrnames);
11848 dd->cntrnames = NULL;
11851 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11852 u64 *psval, void *context, int vl)
11857 if (entry->flags & CNTR_DISABLED) {
11858 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11862 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11864 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11866 /* If its a synthetic counter there is more work we need to do */
11867 if (entry->flags & CNTR_SYNTH) {
11868 if (sval == CNTR_MAX) {
11869 /* No need to read already saturated */
11873 if (entry->flags & CNTR_32BIT) {
11874 /* 32bit counters can wrap multiple times */
11875 u64 upper = sval >> 32;
11876 u64 lower = (sval << 32) >> 32;
11878 if (lower > val) { /* hw wrapped */
11879 if (upper == CNTR_32BIT_MAX)
11885 if (val != CNTR_MAX)
11886 val = (upper << 32) | val;
11889 /* If we rolled we are saturated */
11890 if ((val < sval) || (val > CNTR_MAX))
11897 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11902 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11903 struct cntr_entry *entry,
11904 u64 *psval, void *context, int vl, u64 data)
11908 if (entry->flags & CNTR_DISABLED) {
11909 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11913 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11915 if (entry->flags & CNTR_SYNTH) {
11917 if (entry->flags & CNTR_32BIT) {
11918 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11919 (data << 32) >> 32);
11920 val = data; /* return the full 64bit value */
11922 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11926 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11931 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11936 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11938 struct cntr_entry *entry;
11941 entry = &dev_cntrs[index];
11942 sval = dd->scntrs + entry->offset;
11944 if (vl != CNTR_INVALID_VL)
11947 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11950 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11952 struct cntr_entry *entry;
11955 entry = &dev_cntrs[index];
11956 sval = dd->scntrs + entry->offset;
11958 if (vl != CNTR_INVALID_VL)
11961 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11964 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11966 struct cntr_entry *entry;
11969 entry = &port_cntrs[index];
11970 sval = ppd->scntrs + entry->offset;
11972 if (vl != CNTR_INVALID_VL)
11975 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11976 (index <= C_RCV_HDR_OVF_LAST)) {
11977 /* We do not want to bother for disabled contexts */
11981 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11984 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11986 struct cntr_entry *entry;
11989 entry = &port_cntrs[index];
11990 sval = ppd->scntrs + entry->offset;
11992 if (vl != CNTR_INVALID_VL)
11995 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11996 (index <= C_RCV_HDR_OVF_LAST)) {
11997 /* We do not want to bother for disabled contexts */
12001 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12004 static void update_synth_timer(unsigned long opaque)
12011 struct hfi1_pportdata *ppd;
12012 struct cntr_entry *entry;
12014 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12017 * Rather than keep beating on the CSRs pick a minimal set that we can
12018 * check to watch for potential roll over. We can do this by looking at
12019 * the number of flits sent/recv. If the total flits exceeds 32bits then
12020 * we have to iterate all the counters and update.
12022 entry = &dev_cntrs[C_DC_RCV_FLITS];
12023 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12025 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12026 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12030 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12031 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12033 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12035 * May not be strictly necessary to update but it won't hurt and
12036 * simplifies the logic here.
12039 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12042 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12044 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12045 total_flits, (u64)CNTR_32BIT_MAX);
12046 if (total_flits >= CNTR_32BIT_MAX) {
12047 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12054 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12055 for (i = 0; i < DEV_CNTR_LAST; i++) {
12056 entry = &dev_cntrs[i];
12057 if (entry->flags & CNTR_VL) {
12058 for (vl = 0; vl < C_VL_COUNT; vl++)
12059 read_dev_cntr(dd, i, vl);
12061 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12064 ppd = (struct hfi1_pportdata *)(dd + 1);
12065 for (i = 0; i < dd->num_pports; i++, ppd++) {
12066 for (j = 0; j < PORT_CNTR_LAST; j++) {
12067 entry = &port_cntrs[j];
12068 if (entry->flags & CNTR_VL) {
12069 for (vl = 0; vl < C_VL_COUNT; vl++)
12070 read_port_cntr(ppd, j, vl);
12072 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12078 * We want the value in the register. The goal is to keep track
12079 * of the number of "ticks" not the counter value. In other
12080 * words if the register rolls we want to notice it and go ahead
12081 * and force an update.
12083 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12084 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12087 entry = &dev_cntrs[C_DC_RCV_FLITS];
12088 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12091 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12092 dd->unit, dd->last_tx, dd->last_rx);
12095 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12098 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12101 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12102 static int init_cntrs(struct hfi1_devdata *dd)
12104 int i, rcv_ctxts, j;
12107 char name[C_MAX_NAME];
12108 struct hfi1_pportdata *ppd;
12109 const char *bit_type_32 = ",32";
12110 const int bit_type_32_sz = strlen(bit_type_32);
12112 /* set up the stats timer; the add_timer is done at the end */
12113 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12114 (unsigned long)dd);
12116 /***********************/
12117 /* per device counters */
12118 /***********************/
12120 /* size names and determine how many we have*/
12124 for (i = 0; i < DEV_CNTR_LAST; i++) {
12125 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12126 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12130 if (dev_cntrs[i].flags & CNTR_VL) {
12131 dev_cntrs[i].offset = dd->ndevcntrs;
12132 for (j = 0; j < C_VL_COUNT; j++) {
12133 snprintf(name, C_MAX_NAME, "%s%d",
12134 dev_cntrs[i].name, vl_from_idx(j));
12135 sz += strlen(name);
12136 /* Add ",32" for 32-bit counters */
12137 if (dev_cntrs[i].flags & CNTR_32BIT)
12138 sz += bit_type_32_sz;
12142 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12143 dev_cntrs[i].offset = dd->ndevcntrs;
12144 for (j = 0; j < dd->chip_sdma_engines; j++) {
12145 snprintf(name, C_MAX_NAME, "%s%d",
12146 dev_cntrs[i].name, j);
12147 sz += strlen(name);
12148 /* Add ",32" for 32-bit counters */
12149 if (dev_cntrs[i].flags & CNTR_32BIT)
12150 sz += bit_type_32_sz;
12155 /* +1 for newline. */
12156 sz += strlen(dev_cntrs[i].name) + 1;
12157 /* Add ",32" for 32-bit counters */
12158 if (dev_cntrs[i].flags & CNTR_32BIT)
12159 sz += bit_type_32_sz;
12160 dev_cntrs[i].offset = dd->ndevcntrs;
12165 /* allocate space for the counter values */
12166 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12170 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12174 /* allocate space for the counter names */
12175 dd->cntrnameslen = sz;
12176 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12177 if (!dd->cntrnames)
12180 /* fill in the names */
12181 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12182 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12184 } else if (dev_cntrs[i].flags & CNTR_VL) {
12185 for (j = 0; j < C_VL_COUNT; j++) {
12186 snprintf(name, C_MAX_NAME, "%s%d",
12189 memcpy(p, name, strlen(name));
12192 /* Counter is 32 bits */
12193 if (dev_cntrs[i].flags & CNTR_32BIT) {
12194 memcpy(p, bit_type_32, bit_type_32_sz);
12195 p += bit_type_32_sz;
12200 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12201 for (j = 0; j < dd->chip_sdma_engines; j++) {
12202 snprintf(name, C_MAX_NAME, "%s%d",
12203 dev_cntrs[i].name, j);
12204 memcpy(p, name, strlen(name));
12207 /* Counter is 32 bits */
12208 if (dev_cntrs[i].flags & CNTR_32BIT) {
12209 memcpy(p, bit_type_32, bit_type_32_sz);
12210 p += bit_type_32_sz;
12216 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12217 p += strlen(dev_cntrs[i].name);
12219 /* Counter is 32 bits */
12220 if (dev_cntrs[i].flags & CNTR_32BIT) {
12221 memcpy(p, bit_type_32, bit_type_32_sz);
12222 p += bit_type_32_sz;
12229 /*********************/
12230 /* per port counters */
12231 /*********************/
12234 * Go through the counters for the overflows and disable the ones we
12235 * don't need. This varies based on platform so we need to do it
12236 * dynamically here.
12238 rcv_ctxts = dd->num_rcv_contexts;
12239 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12240 i <= C_RCV_HDR_OVF_LAST; i++) {
12241 port_cntrs[i].flags |= CNTR_DISABLED;
12244 /* size port counter names and determine how many we have*/
12246 dd->nportcntrs = 0;
12247 for (i = 0; i < PORT_CNTR_LAST; i++) {
12248 if (port_cntrs[i].flags & CNTR_DISABLED) {
12249 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12253 if (port_cntrs[i].flags & CNTR_VL) {
12254 port_cntrs[i].offset = dd->nportcntrs;
12255 for (j = 0; j < C_VL_COUNT; j++) {
12256 snprintf(name, C_MAX_NAME, "%s%d",
12257 port_cntrs[i].name, vl_from_idx(j));
12258 sz += strlen(name);
12259 /* Add ",32" for 32-bit counters */
12260 if (port_cntrs[i].flags & CNTR_32BIT)
12261 sz += bit_type_32_sz;
12266 /* +1 for newline */
12267 sz += strlen(port_cntrs[i].name) + 1;
12268 /* Add ",32" for 32-bit counters */
12269 if (port_cntrs[i].flags & CNTR_32BIT)
12270 sz += bit_type_32_sz;
12271 port_cntrs[i].offset = dd->nportcntrs;
12276 /* allocate space for the counter names */
12277 dd->portcntrnameslen = sz;
12278 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12279 if (!dd->portcntrnames)
12282 /* fill in port cntr names */
12283 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12284 if (port_cntrs[i].flags & CNTR_DISABLED)
12287 if (port_cntrs[i].flags & CNTR_VL) {
12288 for (j = 0; j < C_VL_COUNT; j++) {
12289 snprintf(name, C_MAX_NAME, "%s%d",
12290 port_cntrs[i].name, vl_from_idx(j));
12291 memcpy(p, name, strlen(name));
12294 /* Counter is 32 bits */
12295 if (port_cntrs[i].flags & CNTR_32BIT) {
12296 memcpy(p, bit_type_32, bit_type_32_sz);
12297 p += bit_type_32_sz;
12303 memcpy(p, port_cntrs[i].name,
12304 strlen(port_cntrs[i].name));
12305 p += strlen(port_cntrs[i].name);
12307 /* Counter is 32 bits */
12308 if (port_cntrs[i].flags & CNTR_32BIT) {
12309 memcpy(p, bit_type_32, bit_type_32_sz);
12310 p += bit_type_32_sz;
12317 /* allocate per port storage for counter values */
12318 ppd = (struct hfi1_pportdata *)(dd + 1);
12319 for (i = 0; i < dd->num_pports; i++, ppd++) {
12320 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12324 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12329 /* CPU counters need to be allocated and zeroed */
12330 if (init_cpu_counters(dd))
12333 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12340 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12342 switch (chip_lstate) {
12345 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12349 return IB_PORT_DOWN;
12351 return IB_PORT_INIT;
12353 return IB_PORT_ARMED;
12354 case LSTATE_ACTIVE:
12355 return IB_PORT_ACTIVE;
12359 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12361 /* look at the HFI meta-states only */
12362 switch (chip_pstate & 0xf0) {
12364 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12368 return IB_PORTPHYSSTATE_DISABLED;
12370 return OPA_PORTPHYSSTATE_OFFLINE;
12372 return IB_PORTPHYSSTATE_POLLING;
12373 case PLS_CONFIGPHY:
12374 return IB_PORTPHYSSTATE_TRAINING;
12376 return IB_PORTPHYSSTATE_LINKUP;
12378 return IB_PORTPHYSSTATE_PHY_TEST;
12382 /* return the OPA port logical state name */
12383 const char *opa_lstate_name(u32 lstate)
12385 static const char * const port_logical_names[] = {
12391 "PORT_ACTIVE_DEFER",
12393 if (lstate < ARRAY_SIZE(port_logical_names))
12394 return port_logical_names[lstate];
12398 /* return the OPA port physical state name */
12399 const char *opa_pstate_name(u32 pstate)
12401 static const char * const port_physical_names[] = {
12408 "PHYS_LINK_ERR_RECOVER",
12415 if (pstate < ARRAY_SIZE(port_physical_names))
12416 return port_physical_names[pstate];
12421 * Read the hardware link state and set the driver's cached value of it.
12422 * Return the (new) current value.
12424 u32 get_logical_state(struct hfi1_pportdata *ppd)
12428 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12429 if (new_state != ppd->lstate) {
12430 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12431 opa_lstate_name(new_state), new_state);
12432 ppd->lstate = new_state;
12435 * Set port status flags in the page mapped into userspace
12436 * memory. Do it here to ensure a reliable state - this is
12437 * the only function called by all state handling code.
12438 * Always set the flags due to the fact that the cache value
12439 * might have been changed explicitly outside of this
12442 if (ppd->statusp) {
12443 switch (ppd->lstate) {
12446 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12447 HFI1_STATUS_IB_READY);
12449 case IB_PORT_ARMED:
12450 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12452 case IB_PORT_ACTIVE:
12453 *ppd->statusp |= HFI1_STATUS_IB_READY;
12457 return ppd->lstate;
12461 * wait_logical_linkstate - wait for an IB link state change to occur
12462 * @ppd: port device
12463 * @state: the state to wait for
12464 * @msecs: the number of milliseconds to wait
12466 * Wait up to msecs milliseconds for IB link state change to occur.
12467 * For now, take the easy polling route.
12468 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12470 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12473 unsigned long timeout;
12475 timeout = jiffies + msecs_to_jiffies(msecs);
12477 if (get_logical_state(ppd) == state)
12479 if (time_after(jiffies, timeout))
12483 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12488 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12493 pstate = read_physical_state(ppd->dd);
12494 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12495 if (ppd->last_pstate != ib_pstate) {
12496 dd_dev_info(ppd->dd,
12497 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12498 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12500 ppd->last_pstate = ib_pstate;
12505 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12506 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12508 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12509 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12511 int hfi1_init_ctxt(struct send_context *sc)
12514 struct hfi1_devdata *dd = sc->dd;
12516 u8 set = (sc->type == SC_USER ?
12517 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12518 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12519 reg = read_kctxt_csr(dd, sc->hw_context,
12520 SEND_CTXT_CHECK_ENABLE);
12522 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12524 SET_STATIC_RATE_CONTROL_SMASK(reg);
12525 write_kctxt_csr(dd, sc->hw_context,
12526 SEND_CTXT_CHECK_ENABLE, reg);
12531 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12536 if (dd->icode != ICODE_RTL_SILICON) {
12537 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12538 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12542 reg = read_csr(dd, ASIC_STS_THERM);
12543 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12544 ASIC_STS_THERM_CURR_TEMP_MASK);
12545 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12546 ASIC_STS_THERM_LO_TEMP_MASK);
12547 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12548 ASIC_STS_THERM_HI_TEMP_MASK);
12549 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12550 ASIC_STS_THERM_CRIT_TEMP_MASK);
12551 /* triggers is a 3-bit value - 1 bit per trigger. */
12552 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12557 /* ========================================================================= */
12560 * Enable/disable chip from delivering interrupts.
12562 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12567 * In HFI, the mask needs to be 1 to allow interrupts.
12570 /* enable all interrupts */
12571 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12572 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12576 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12577 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12582 * Clear all interrupt sources on the chip.
12584 static void clear_all_interrupts(struct hfi1_devdata *dd)
12588 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12589 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12591 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12592 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12593 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12594 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12595 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12596 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12597 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12598 for (i = 0; i < dd->chip_send_contexts; i++)
12599 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12600 for (i = 0; i < dd->chip_sdma_engines; i++)
12601 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12603 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12604 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12605 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12608 /* Move to pcie.c? */
12609 static void disable_intx(struct pci_dev *pdev)
12614 static void clean_up_interrupts(struct hfi1_devdata *dd)
12618 /* remove irqs - must happen before disabling/turning off */
12619 if (dd->num_msix_entries) {
12621 struct hfi1_msix_entry *me = dd->msix_entries;
12623 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12624 if (!me->arg) /* => no irq, no affinity */
12626 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12627 free_irq(me->msix.vector, me->arg);
12631 if (dd->requested_intx_irq) {
12632 free_irq(dd->pcidev->irq, dd);
12633 dd->requested_intx_irq = 0;
12637 /* turn off interrupts */
12638 if (dd->num_msix_entries) {
12640 pci_disable_msix(dd->pcidev);
12643 disable_intx(dd->pcidev);
12646 /* clean structures */
12647 kfree(dd->msix_entries);
12648 dd->msix_entries = NULL;
12649 dd->num_msix_entries = 0;
12653 * Remap the interrupt source from the general handler to the given MSI-X
12656 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12661 /* clear from the handled mask of the general interrupt */
12664 dd->gi_mask[m] &= ~((u64)1 << n);
12666 /* direct the chip source to the given MSI-X interrupt */
12669 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12670 reg &= ~((u64)0xff << (8 * n));
12671 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12672 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12675 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12676 int engine, int msix_intr)
12679 * SDMA engine interrupt sources grouped by type, rather than
12680 * engine. Per-engine interrupts are as follows:
12685 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12687 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12689 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12693 static int request_intx_irq(struct hfi1_devdata *dd)
12697 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12699 ret = request_irq(dd->pcidev->irq, general_interrupt,
12700 IRQF_SHARED, dd->intx_name, dd);
12702 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12705 dd->requested_intx_irq = 1;
12709 static int request_msix_irqs(struct hfi1_devdata *dd)
12711 int first_general, last_general;
12712 int first_sdma, last_sdma;
12713 int first_rx, last_rx;
12716 /* calculate the ranges we are going to use */
12718 last_general = first_general + 1;
12719 first_sdma = last_general;
12720 last_sdma = first_sdma + dd->num_sdma;
12721 first_rx = last_sdma;
12722 last_rx = first_rx + dd->n_krcv_queues;
12725 * Sanity check - the code expects all SDMA chip source
12726 * interrupts to be in the same CSR, starting at bit 0. Verify
12727 * that this is true by checking the bit location of the start.
12729 BUILD_BUG_ON(IS_SDMA_START % 64);
12731 for (i = 0; i < dd->num_msix_entries; i++) {
12732 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12733 const char *err_info;
12734 irq_handler_t handler;
12735 irq_handler_t thread = NULL;
12738 struct hfi1_ctxtdata *rcd = NULL;
12739 struct sdma_engine *sde = NULL;
12741 /* obtain the arguments to request_irq */
12742 if (first_general <= i && i < last_general) {
12743 idx = i - first_general;
12744 handler = general_interrupt;
12746 snprintf(me->name, sizeof(me->name),
12747 DRIVER_NAME "_%d", dd->unit);
12748 err_info = "general";
12749 me->type = IRQ_GENERAL;
12750 } else if (first_sdma <= i && i < last_sdma) {
12751 idx = i - first_sdma;
12752 sde = &dd->per_sdma[idx];
12753 handler = sdma_interrupt;
12755 snprintf(me->name, sizeof(me->name),
12756 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12758 remap_sdma_interrupts(dd, idx, i);
12759 me->type = IRQ_SDMA;
12760 } else if (first_rx <= i && i < last_rx) {
12761 idx = i - first_rx;
12762 rcd = dd->rcd[idx];
12763 /* no interrupt if no rcd */
12767 * Set the interrupt register and mask for this
12768 * context's interrupt.
12770 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12771 rcd->imask = ((u64)1) <<
12772 ((IS_RCVAVAIL_START + idx) % 64);
12773 handler = receive_context_interrupt;
12774 thread = receive_context_thread;
12776 snprintf(me->name, sizeof(me->name),
12777 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12778 err_info = "receive context";
12779 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12780 me->type = IRQ_RCVCTXT;
12782 /* not in our expected range - complain, then
12786 "Unexpected extra MSI-X interrupt %d\n", i);
12789 /* no argument, no interrupt */
12792 /* make sure the name is terminated */
12793 me->name[sizeof(me->name) - 1] = 0;
12795 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12799 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12800 err_info, me->msix.vector, idx, ret);
12804 * assign arg after request_irq call, so it will be
12809 ret = hfi1_get_irq_affinity(dd, me);
12812 "unable to pin IRQ %d\n", ret);
12819 * Set the general handler to accept all interrupts, remap all
12820 * chip interrupts back to MSI-X 0.
12822 static void reset_interrupts(struct hfi1_devdata *dd)
12826 /* all interrupts handled by the general handler */
12827 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12828 dd->gi_mask[i] = ~(u64)0;
12830 /* all chip interrupts map to MSI-X 0 */
12831 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12832 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12835 static int set_up_interrupts(struct hfi1_devdata *dd)
12837 struct hfi1_msix_entry *entries;
12838 u32 total, request;
12840 int single_interrupt = 0; /* we expect to have all the interrupts */
12844 * 1 general, "slow path" interrupt (includes the SDMA engines
12845 * slow source, SDMACleanupDone)
12846 * N interrupts - one per used SDMA engine
12847 * M interrupt - one per kernel receive context
12849 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12851 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12856 /* 1-1 MSI-X entry assignment */
12857 for (i = 0; i < total; i++)
12858 entries[i].msix.entry = i;
12860 /* ask for MSI-X interrupts */
12862 request_msix(dd, &request, entries);
12864 if (request == 0) {
12866 /* dd->num_msix_entries already zero */
12868 single_interrupt = 1;
12869 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12872 dd->num_msix_entries = request;
12873 dd->msix_entries = entries;
12875 if (request != total) {
12876 /* using MSI-X, with reduced interrupts */
12879 "cannot handle reduced interrupt case, want %u, got %u\n",
12884 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12887 /* mask all interrupts */
12888 set_intr_state(dd, 0);
12889 /* clear all pending interrupts */
12890 clear_all_interrupts(dd);
12892 /* reset general handler mask, chip MSI-X mappings */
12893 reset_interrupts(dd);
12895 if (single_interrupt)
12896 ret = request_intx_irq(dd);
12898 ret = request_msix_irqs(dd);
12905 clean_up_interrupts(dd);
12910 * Set up context values in dd. Sets:
12912 * num_rcv_contexts - number of contexts being used
12913 * n_krcv_queues - number of kernel contexts
12914 * first_user_ctxt - first non-kernel context in array of contexts
12915 * freectxts - number of free user contexts
12916 * num_send_contexts - number of PIO send contexts being used
12918 static int set_up_context_variables(struct hfi1_devdata *dd)
12920 unsigned long num_kernel_contexts;
12921 int total_contexts;
12925 int user_rmt_reduced;
12928 * Kernel receive contexts:
12929 * - Context 0 - control context (VL15/multicast/error)
12930 * - Context 1 - first kernel context
12931 * - Context 2 - second kernel context
12936 * n_krcvqs is the sum of module parameter kernel receive
12937 * contexts, krcvqs[]. It does not include the control
12938 * context, so add that.
12940 num_kernel_contexts = n_krcvqs + 1;
12942 num_kernel_contexts = DEFAULT_KRCVQS + 1;
12944 * Every kernel receive context needs an ACK send context.
12945 * one send context is allocated for each VL{0-7} and VL15
12947 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12949 "Reducing # kernel rcv contexts to: %d, from %lu\n",
12950 (int)(dd->chip_send_contexts - num_vls - 1),
12951 num_kernel_contexts);
12952 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12956 * - default to 1 user context per real (non-HT) CPU core if
12957 * num_user_contexts is negative
12959 if (num_user_contexts < 0)
12960 num_user_contexts =
12961 cpumask_weight(&node_affinity.real_cpu_mask);
12963 total_contexts = num_kernel_contexts + num_user_contexts;
12966 * Adjust the counts given a global max.
12968 if (total_contexts > dd->chip_rcv_contexts) {
12970 "Reducing # user receive contexts to: %d, from %d\n",
12971 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12972 (int)num_user_contexts);
12973 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12975 total_contexts = num_kernel_contexts + num_user_contexts;
12978 /* each user context requires an entry in the RMT */
12979 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12980 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12981 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12983 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12984 (int)num_user_contexts,
12987 num_user_contexts = user_rmt_reduced;
12988 total_contexts = num_kernel_contexts + num_user_contexts;
12991 /* the first N are kernel contexts, the rest are user contexts */
12992 dd->num_rcv_contexts = total_contexts;
12993 dd->n_krcv_queues = num_kernel_contexts;
12994 dd->first_user_ctxt = num_kernel_contexts;
12995 dd->num_user_contexts = num_user_contexts;
12996 dd->freectxts = num_user_contexts;
12998 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12999 (int)dd->chip_rcv_contexts,
13000 (int)dd->num_rcv_contexts,
13001 (int)dd->n_krcv_queues,
13002 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13005 * Receive array allocation:
13006 * All RcvArray entries are divided into groups of 8. This
13007 * is required by the hardware and will speed up writes to
13008 * consecutive entries by using write-combining of the entire
13011 * The number of groups are evenly divided among all contexts.
13012 * any left over groups will be given to the first N user
13015 dd->rcv_entries.group_size = RCV_INCREMENT;
13016 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13017 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13018 dd->rcv_entries.nctxt_extra = ngroups -
13019 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13020 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13021 dd->rcv_entries.ngroups,
13022 dd->rcv_entries.nctxt_extra);
13023 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13024 MAX_EAGER_ENTRIES * 2) {
13025 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13026 dd->rcv_entries.group_size;
13028 "RcvArray group count too high, change to %u\n",
13029 dd->rcv_entries.ngroups);
13030 dd->rcv_entries.nctxt_extra = 0;
13033 * PIO send contexts
13035 ret = init_sc_pools_and_sizes(dd);
13036 if (ret >= 0) { /* success */
13037 dd->num_send_contexts = ret;
13040 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13041 dd->chip_send_contexts,
13042 dd->num_send_contexts,
13043 dd->sc_sizes[SC_KERNEL].count,
13044 dd->sc_sizes[SC_ACK].count,
13045 dd->sc_sizes[SC_USER].count,
13046 dd->sc_sizes[SC_VL15].count);
13047 ret = 0; /* success */
13054 * Set the device/port partition key table. The MAD code
13055 * will ensure that, at least, the partial management
13056 * partition key is present in the table.
13058 static void set_partition_keys(struct hfi1_pportdata *ppd)
13060 struct hfi1_devdata *dd = ppd->dd;
13064 dd_dev_info(dd, "Setting partition keys\n");
13065 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13066 reg |= (ppd->pkeys[i] &
13067 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13069 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13070 /* Each register holds 4 PKey values. */
13071 if ((i % 4) == 3) {
13072 write_csr(dd, RCV_PARTITION_KEY +
13073 ((i - 3) * 2), reg);
13078 /* Always enable HW pkeys check when pkeys table is set */
13079 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13083 * These CSRs and memories are uninitialized on reset and must be
13084 * written before reading to set the ECC/parity bits.
13086 * NOTE: All user context CSRs that are not mmaped write-only
13087 * (e.g. the TID flows) must be initialized even if the driver never
13090 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13095 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13096 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13098 /* SendCtxtCreditReturnAddr */
13099 for (i = 0; i < dd->chip_send_contexts; i++)
13100 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13102 /* PIO Send buffers */
13103 /* SDMA Send buffers */
13105 * These are not normally read, and (presently) have no method
13106 * to be read, so are not pre-initialized
13110 /* RcvHdrTailAddr */
13111 /* RcvTidFlowTable */
13112 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13113 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13114 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13115 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13116 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13120 for (i = 0; i < dd->chip_rcv_array_count; i++)
13121 write_csr(dd, RCV_ARRAY + (8 * i),
13122 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
13124 /* RcvQPMapTable */
13125 for (i = 0; i < 32; i++)
13126 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13130 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13132 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13135 unsigned long timeout;
13138 /* is the condition present? */
13139 reg = read_csr(dd, CCE_STATUS);
13140 if ((reg & status_bits) == 0)
13143 /* clear the condition */
13144 write_csr(dd, CCE_CTRL, ctrl_bits);
13146 /* wait for the condition to clear */
13147 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13149 reg = read_csr(dd, CCE_STATUS);
13150 if ((reg & status_bits) == 0)
13152 if (time_after(jiffies, timeout)) {
13154 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13155 status_bits, reg & status_bits);
13162 /* set CCE CSRs to chip reset defaults */
13163 static void reset_cce_csrs(struct hfi1_devdata *dd)
13167 /* CCE_REVISION read-only */
13168 /* CCE_REVISION2 read-only */
13169 /* CCE_CTRL - bits clear automatically */
13170 /* CCE_STATUS read-only, use CceCtrl to clear */
13171 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13172 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13173 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13174 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13175 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13176 /* CCE_ERR_STATUS read-only */
13177 write_csr(dd, CCE_ERR_MASK, 0);
13178 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13179 /* CCE_ERR_FORCE leave alone */
13180 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13181 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13182 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13183 /* CCE_PCIE_CTRL leave alone */
13184 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13185 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13186 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13187 CCE_MSIX_TABLE_UPPER_RESETCSR);
13189 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13190 /* CCE_MSIX_PBA read-only */
13191 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13192 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13194 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13195 write_csr(dd, CCE_INT_MAP, 0);
13196 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13197 /* CCE_INT_STATUS read-only */
13198 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13199 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13200 /* CCE_INT_FORCE leave alone */
13201 /* CCE_INT_BLOCKED read-only */
13203 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13204 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13207 /* set MISC CSRs to chip reset defaults */
13208 static void reset_misc_csrs(struct hfi1_devdata *dd)
13212 for (i = 0; i < 32; i++) {
13213 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13214 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13215 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13218 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13219 * only be written 128-byte chunks
13221 /* init RSA engine to clear lingering errors */
13222 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13223 write_csr(dd, MISC_CFG_RSA_MU, 0);
13224 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13225 /* MISC_STS_8051_DIGEST read-only */
13226 /* MISC_STS_SBM_DIGEST read-only */
13227 /* MISC_STS_PCIE_DIGEST read-only */
13228 /* MISC_STS_FAB_DIGEST read-only */
13229 /* MISC_ERR_STATUS read-only */
13230 write_csr(dd, MISC_ERR_MASK, 0);
13231 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13232 /* MISC_ERR_FORCE leave alone */
13235 /* set TXE CSRs to chip reset defaults */
13236 static void reset_txe_csrs(struct hfi1_devdata *dd)
13243 write_csr(dd, SEND_CTRL, 0);
13244 __cm_reset(dd, 0); /* reset CM internal state */
13245 /* SEND_CONTEXTS read-only */
13246 /* SEND_DMA_ENGINES read-only */
13247 /* SEND_PIO_MEM_SIZE read-only */
13248 /* SEND_DMA_MEM_SIZE read-only */
13249 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13250 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13251 /* SEND_PIO_ERR_STATUS read-only */
13252 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13253 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13254 /* SEND_PIO_ERR_FORCE leave alone */
13255 /* SEND_DMA_ERR_STATUS read-only */
13256 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13257 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13258 /* SEND_DMA_ERR_FORCE leave alone */
13259 /* SEND_EGRESS_ERR_STATUS read-only */
13260 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13261 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13262 /* SEND_EGRESS_ERR_FORCE leave alone */
13263 write_csr(dd, SEND_BTH_QP, 0);
13264 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13265 write_csr(dd, SEND_SC2VLT0, 0);
13266 write_csr(dd, SEND_SC2VLT1, 0);
13267 write_csr(dd, SEND_SC2VLT2, 0);
13268 write_csr(dd, SEND_SC2VLT3, 0);
13269 write_csr(dd, SEND_LEN_CHECK0, 0);
13270 write_csr(dd, SEND_LEN_CHECK1, 0);
13271 /* SEND_ERR_STATUS read-only */
13272 write_csr(dd, SEND_ERR_MASK, 0);
13273 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13274 /* SEND_ERR_FORCE read-only */
13275 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13276 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13277 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13278 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13279 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13280 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13281 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13282 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13283 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13284 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13285 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13286 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13287 /* SEND_CM_CREDIT_USED_STATUS read-only */
13288 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13289 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13290 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13291 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13292 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13293 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13294 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13295 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13296 /* SEND_CM_CREDIT_USED_VL read-only */
13297 /* SEND_CM_CREDIT_USED_VL15 read-only */
13298 /* SEND_EGRESS_CTXT_STATUS read-only */
13299 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13300 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13301 /* SEND_EGRESS_ERR_INFO read-only */
13302 /* SEND_EGRESS_ERR_SOURCE read-only */
13305 * TXE Per-Context CSRs
13307 for (i = 0; i < dd->chip_send_contexts; i++) {
13308 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13309 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13310 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13311 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13312 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13313 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13314 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13315 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13316 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13317 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13318 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13319 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13323 * TXE Per-SDMA CSRs
13325 for (i = 0; i < dd->chip_sdma_engines; i++) {
13326 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13327 /* SEND_DMA_STATUS read-only */
13328 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13329 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13330 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13331 /* SEND_DMA_HEAD read-only */
13332 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13333 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13334 /* SEND_DMA_IDLE_CNT read-only */
13335 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13336 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13337 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13338 /* SEND_DMA_ENG_ERR_STATUS read-only */
13339 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13340 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13341 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13342 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13343 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13344 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13345 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13346 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13347 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13348 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13354 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13356 static void init_rbufs(struct hfi1_devdata *dd)
13362 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13367 reg = read_csr(dd, RCV_STATUS);
13368 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13369 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13372 * Give up after 1ms - maximum wait time.
13374 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13375 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13376 * 136 KB / (66% * 250MB/s) = 844us
13378 if (count++ > 500) {
13380 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13384 udelay(2); /* do not busy-wait the CSR */
13387 /* start the init - expect RcvCtrl to be 0 */
13388 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13391 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13392 * period after the write before RcvStatus.RxRbufInitDone is valid.
13393 * The delay in the first run through the loop below is sufficient and
13394 * required before the first read of RcvStatus.RxRbufInintDone.
13396 read_csr(dd, RCV_CTRL);
13398 /* wait for the init to finish */
13401 /* delay is required first time through - see above */
13402 udelay(2); /* do not busy-wait the CSR */
13403 reg = read_csr(dd, RCV_STATUS);
13404 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13407 /* give up after 100us - slowest possible at 33MHz is 73us */
13408 if (count++ > 50) {
13410 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13417 /* set RXE CSRs to chip reset defaults */
13418 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13425 write_csr(dd, RCV_CTRL, 0);
13427 /* RCV_STATUS read-only */
13428 /* RCV_CONTEXTS read-only */
13429 /* RCV_ARRAY_CNT read-only */
13430 /* RCV_BUF_SIZE read-only */
13431 write_csr(dd, RCV_BTH_QP, 0);
13432 write_csr(dd, RCV_MULTICAST, 0);
13433 write_csr(dd, RCV_BYPASS, 0);
13434 write_csr(dd, RCV_VL15, 0);
13435 /* this is a clear-down */
13436 write_csr(dd, RCV_ERR_INFO,
13437 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13438 /* RCV_ERR_STATUS read-only */
13439 write_csr(dd, RCV_ERR_MASK, 0);
13440 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13441 /* RCV_ERR_FORCE leave alone */
13442 for (i = 0; i < 32; i++)
13443 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13444 for (i = 0; i < 4; i++)
13445 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13446 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13447 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13448 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13449 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13450 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13451 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13452 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13453 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13455 for (i = 0; i < 32; i++)
13456 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13459 * RXE Kernel and User Per-Context CSRs
13461 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13463 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13464 /* RCV_CTXT_STATUS read-only */
13465 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13466 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13467 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13468 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13469 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13470 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13471 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13472 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13473 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13474 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13477 /* RCV_HDR_TAIL read-only */
13478 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13479 /* RCV_EGR_INDEX_TAIL read-only */
13480 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13481 /* RCV_EGR_OFFSET_TAIL read-only */
13482 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13483 write_uctxt_csr(dd, i,
13484 RCV_TID_FLOW_TABLE + (8 * j), 0);
13490 * Set sc2vl tables.
13492 * They power on to zeros, so to avoid send context errors
13493 * they need to be set:
13495 * SC 0-7 -> VL 0-7 (respectively)
13500 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13503 /* init per architecture spec, constrained by hardware capability */
13505 /* HFI maps sent packets */
13506 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13512 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13518 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13524 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13531 /* DC maps received packets */
13532 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13534 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13535 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13536 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13538 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13539 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13541 /* initialize the cached sc2vl values consistently with h/w */
13542 for (i = 0; i < 32; i++) {
13543 if (i < 8 || i == 15)
13544 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13546 *((u8 *)(dd->sc2vl) + i) = 0;
13551 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13552 * depend on the chip going through a power-on reset - a driver may be loaded
13553 * and unloaded many times.
13555 * Do not write any CSR values to the chip in this routine - there may be
13556 * a reset following the (possible) FLR in this routine.
13559 static void init_chip(struct hfi1_devdata *dd)
13564 * Put the HFI CSRs in a known state.
13565 * Combine this with a DC reset.
13567 * Stop the device from doing anything while we do a
13568 * reset. We know there are no other active users of
13569 * the device since we are now in charge. Turn off
13570 * off all outbound and inbound traffic and make sure
13571 * the device does not generate any interrupts.
13574 /* disable send contexts and SDMA engines */
13575 write_csr(dd, SEND_CTRL, 0);
13576 for (i = 0; i < dd->chip_send_contexts; i++)
13577 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13578 for (i = 0; i < dd->chip_sdma_engines; i++)
13579 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13580 /* disable port (turn off RXE inbound traffic) and contexts */
13581 write_csr(dd, RCV_CTRL, 0);
13582 for (i = 0; i < dd->chip_rcv_contexts; i++)
13583 write_csr(dd, RCV_CTXT_CTRL, 0);
13584 /* mask all interrupt sources */
13585 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13586 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13589 * DC Reset: do a full DC reset before the register clear.
13590 * A recommended length of time to hold is one CSR read,
13591 * so reread the CceDcCtrl. Then, hold the DC in reset
13592 * across the clear.
13594 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13595 (void)read_csr(dd, CCE_DC_CTRL);
13599 * A FLR will reset the SPC core and part of the PCIe.
13600 * The parts that need to be restored have already been
13603 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13605 /* do the FLR, the DC reset will remain */
13608 /* restore command and BARs */
13609 restore_pci_variables(dd);
13612 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13614 restore_pci_variables(dd);
13617 dd_dev_info(dd, "Resetting CSRs with writes\n");
13618 reset_cce_csrs(dd);
13619 reset_txe_csrs(dd);
13620 reset_rxe_csrs(dd);
13621 reset_misc_csrs(dd);
13623 /* clear the DC reset */
13624 write_csr(dd, CCE_DC_CTRL, 0);
13626 /* Set the LED off */
13630 * Clear the QSFP reset.
13631 * An FLR enforces a 0 on all out pins. The driver does not touch
13632 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13633 * anything plugged constantly in reset, if it pays attention
13635 * Prime examples of this are optical cables. Set all pins high.
13636 * I2CCLK and I2CDAT will change per direction, and INT_N and
13637 * MODPRS_N are input only and their value is ignored.
13639 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13640 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13641 init_chip_resources(dd);
13644 static void init_early_variables(struct hfi1_devdata *dd)
13648 /* assign link credit variables */
13650 dd->link_credits = CM_GLOBAL_CREDITS;
13652 dd->link_credits--;
13653 dd->vcu = cu_to_vcu(hfi1_cu);
13654 /* enough room for 8 MAD packets plus header - 17K */
13655 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13656 if (dd->vl15_init > dd->link_credits)
13657 dd->vl15_init = dd->link_credits;
13659 write_uninitialized_csrs_and_memories(dd);
13661 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13662 for (i = 0; i < dd->num_pports; i++) {
13663 struct hfi1_pportdata *ppd = &dd->pport[i];
13665 set_partition_keys(ppd);
13667 init_sc2vl_tables(dd);
13670 static void init_kdeth_qp(struct hfi1_devdata *dd)
13672 /* user changed the KDETH_QP */
13673 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13674 /* out of range or illegal value */
13675 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13678 if (kdeth_qp == 0) /* not set, or failed range check */
13679 kdeth_qp = DEFAULT_KDETH_QP;
13681 write_csr(dd, SEND_BTH_QP,
13682 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13683 SEND_BTH_QP_KDETH_QP_SHIFT);
13685 write_csr(dd, RCV_BTH_QP,
13686 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13687 RCV_BTH_QP_KDETH_QP_SHIFT);
13692 * @dd - device data
13693 * @first_ctxt - first context
13694 * @last_ctxt - first context
13696 * This return sets the qpn mapping table that
13697 * is indexed by qpn[8:1].
13699 * The routine will round robin the 256 settings
13700 * from first_ctxt to last_ctxt.
13702 * The first/last looks ahead to having specialized
13703 * receive contexts for mgmt and bypass. Normal
13704 * verbs traffic will assumed to be on a range
13705 * of receive contexts.
13707 static void init_qpmap_table(struct hfi1_devdata *dd,
13712 u64 regno = RCV_QP_MAP_TABLE;
13714 u64 ctxt = first_ctxt;
13716 for (i = 0; i < 256; i++) {
13717 reg |= ctxt << (8 * (i % 8));
13719 if (ctxt > last_ctxt)
13722 write_csr(dd, regno, reg);
13728 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13729 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13732 struct rsm_map_table {
13733 u64 map[NUM_MAP_REGS];
13737 struct rsm_rule_data {
13753 * Return an initialized RMT map table for users to fill in. OK if it
13754 * returns NULL, indicating no table.
13756 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13758 struct rsm_map_table *rmt;
13759 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13761 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13763 memset(rmt->map, rxcontext, sizeof(rmt->map));
13771 * Write the final RMT map table to the chip and free the table. OK if
13774 static void complete_rsm_map_table(struct hfi1_devdata *dd,
13775 struct rsm_map_table *rmt)
13780 /* write table to chip */
13781 for (i = 0; i < NUM_MAP_REGS; i++)
13782 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13785 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13790 * Add a receive side mapping rule.
13792 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13793 struct rsm_rule_data *rrd)
13795 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13796 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13797 1ull << rule_index | /* enable bit */
13798 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13799 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13800 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13801 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13802 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13803 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13804 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13805 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13806 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13807 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13808 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13809 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13810 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13813 /* return the number of RSM map table entries that will be used for QOS */
13814 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13821 /* is QOS active at all? */
13822 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13827 /* determine bits for qpn */
13828 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13829 if (krcvqs[i] > max_by_vl)
13830 max_by_vl = krcvqs[i];
13831 if (max_by_vl > 32)
13833 m = ilog2(__roundup_pow_of_two(max_by_vl));
13835 /* determine bits for vl */
13836 n = ilog2(__roundup_pow_of_two(num_vls));
13838 /* reject if too much is used */
13847 return 1 << (m + n);
13858 * init_qos - init RX qos
13859 * @dd - device data
13860 * @rmt - RSM map table
13862 * This routine initializes Rule 0 and the RSM map table to implement
13863 * quality of service (qos).
13865 * If all of the limit tests succeed, qos is applied based on the array
13866 * interpretation of krcvqs where entry 0 is VL0.
13868 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13869 * feed both the RSM map table and the single rule.
13871 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
13873 struct rsm_rule_data rrd;
13874 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13875 unsigned int rmt_entries;
13880 rmt_entries = qos_rmt_entries(dd, &m, &n);
13881 if (rmt_entries == 0)
13883 qpns_per_vl = 1 << m;
13885 /* enough room in the map table? */
13886 rmt_entries = 1 << (m + n);
13887 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
13890 /* add qos entries to the the RSM map table */
13891 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
13894 for (qpn = 0, tctxt = ctxt;
13895 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13896 unsigned idx, regoff, regidx;
13898 /* generate the index the hardware will produce */
13899 idx = rmt->used + ((qpn << n) ^ i);
13900 regoff = (idx % 8) * 8;
13902 /* replace default with context number */
13903 reg = rmt->map[regidx];
13904 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13906 reg |= (u64)(tctxt++) << regoff;
13907 rmt->map[regidx] = reg;
13908 if (tctxt == ctxt + krcvqs[i])
13914 rrd.offset = rmt->used;
13916 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13917 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13918 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13919 rrd.index1_width = n;
13920 rrd.index2_off = QPN_SELECT_OFFSET;
13921 rrd.index2_width = m + n;
13922 rrd.mask1 = LRH_BTH_MASK;
13923 rrd.value1 = LRH_BTH_VALUE;
13924 rrd.mask2 = LRH_SC_MASK;
13925 rrd.value2 = LRH_SC_VALUE;
13928 add_rsm_rule(dd, 0, &rrd);
13930 /* mark RSM map entries as used */
13931 rmt->used += rmt_entries;
13932 /* map everything else to the mcast/err/vl15 context */
13933 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
13934 dd->qos_shift = n + 1;
13938 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13941 static void init_user_fecn_handling(struct hfi1_devdata *dd,
13942 struct rsm_map_table *rmt)
13944 struct rsm_rule_data rrd;
13946 int i, idx, regoff, regidx;
13949 /* there needs to be enough room in the map table */
13950 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13951 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13956 * RSM will extract the destination context as an index into the
13957 * map table. The destination contexts are a sequential block
13958 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13959 * Map entries are accessed as offset + extracted value. Adjust
13960 * the added offset so this sequence can be placed anywhere in
13961 * the table - as long as the entries themselves do not wrap.
13962 * There are only enough bits in offset for the table size, so
13963 * start with that to allow for a "negative" offset.
13965 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13966 (int)dd->first_user_ctxt);
13968 for (i = dd->first_user_ctxt, idx = rmt->used;
13969 i < dd->num_rcv_contexts; i++, idx++) {
13970 /* replace with identity mapping */
13971 regoff = (idx % 8) * 8;
13973 reg = rmt->map[regidx];
13974 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13975 reg |= (u64)i << regoff;
13976 rmt->map[regidx] = reg;
13980 * For RSM intercept of Expected FECN packets:
13981 * o packet type 0 - expected
13982 * o match on F (bit 95), using select/match 1, and
13983 * o match on SH (bit 133), using select/match 2.
13985 * Use index 1 to extract the 8-bit receive context from DestQP
13986 * (start at bit 64). Use that as the RSM map table index.
13988 rrd.offset = offset;
13990 rrd.field1_off = 95;
13991 rrd.field2_off = 133;
13992 rrd.index1_off = 64;
13993 rrd.index1_width = 8;
13994 rrd.index2_off = 0;
13995 rrd.index2_width = 0;
14002 add_rsm_rule(dd, 1, &rrd);
14004 rmt->used += dd->num_user_contexts;
14007 static void init_rxe(struct hfi1_devdata *dd)
14009 struct rsm_map_table *rmt;
14011 /* enable all receive errors */
14012 write_csr(dd, RCV_ERR_MASK, ~0ull);
14014 rmt = alloc_rsm_map_table(dd);
14015 /* set up QOS, including the QPN map table */
14017 init_user_fecn_handling(dd, rmt);
14018 complete_rsm_map_table(dd, rmt);
14022 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14023 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14024 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14025 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14026 * Max_PayLoad_Size set to its minimum of 128.
14028 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14029 * (64 bytes). Max_Payload_Size is possibly modified upward in
14030 * tune_pcie_caps() which is called after this routine.
14034 static void init_other(struct hfi1_devdata *dd)
14036 /* enable all CCE errors */
14037 write_csr(dd, CCE_ERR_MASK, ~0ull);
14038 /* enable *some* Misc errors */
14039 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14040 /* enable all DC errors, except LCB */
14041 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14042 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14046 * Fill out the given AU table using the given CU. A CU is defined in terms
14047 * AUs. The table is a an encoding: given the index, how many AUs does that
14050 * NOTE: Assumes that the register layout is the same for the
14051 * local and remote tables.
14053 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14054 u32 csr0to3, u32 csr4to7)
14056 write_csr(dd, csr0to3,
14057 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14058 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14060 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14062 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14063 write_csr(dd, csr4to7,
14065 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14067 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14069 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14071 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14074 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14076 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14077 SEND_CM_LOCAL_AU_TABLE4_TO7);
14080 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14082 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14083 SEND_CM_REMOTE_AU_TABLE4_TO7);
14086 static void init_txe(struct hfi1_devdata *dd)
14090 /* enable all PIO, SDMA, general, and Egress errors */
14091 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14092 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14093 write_csr(dd, SEND_ERR_MASK, ~0ull);
14094 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14096 /* enable all per-context and per-SDMA engine errors */
14097 for (i = 0; i < dd->chip_send_contexts; i++)
14098 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14099 for (i = 0; i < dd->chip_sdma_engines; i++)
14100 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14102 /* set the local CU to AU mapping */
14103 assign_local_cm_au_table(dd, dd->vcu);
14106 * Set reasonable default for Credit Return Timer
14107 * Don't set on Simulator - causes it to choke.
14109 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14110 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14113 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14115 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14120 if (!rcd || !rcd->sc) {
14124 sctxt = rcd->sc->hw_context;
14125 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14126 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14127 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14128 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14129 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14130 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14131 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14133 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14136 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14137 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14138 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14141 /* Enable J_KEY check on receive context. */
14142 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14143 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14144 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14145 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14150 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14152 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14157 if (!rcd || !rcd->sc) {
14161 sctxt = rcd->sc->hw_context;
14162 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14164 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14165 * This check would not have been enabled for A0 h/w, see
14169 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14170 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14171 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14173 /* Turn off the J_KEY on the receive side */
14174 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14179 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14181 struct hfi1_ctxtdata *rcd;
14186 if (ctxt < dd->num_rcv_contexts) {
14187 rcd = dd->rcd[ctxt];
14192 if (!rcd || !rcd->sc) {
14196 sctxt = rcd->sc->hw_context;
14197 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14198 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14199 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14200 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14201 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14202 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14203 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14208 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14210 struct hfi1_ctxtdata *rcd;
14215 if (ctxt < dd->num_rcv_contexts) {
14216 rcd = dd->rcd[ctxt];
14221 if (!rcd || !rcd->sc) {
14225 sctxt = rcd->sc->hw_context;
14226 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14227 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14228 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14229 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14235 * Start doing the clean up the the chip. Our clean up happens in multiple
14236 * stages and this is just the first.
14238 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14243 clean_up_interrupts(dd);
14244 finish_chip_resources(dd);
14247 #define HFI_BASE_GUID(dev) \
14248 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14251 * Information can be shared between the two HFIs on the same ASIC
14252 * in the same OS. This function finds the peer device and sets
14253 * up a shared structure.
14255 static int init_asic_data(struct hfi1_devdata *dd)
14257 unsigned long flags;
14258 struct hfi1_devdata *tmp, *peer = NULL;
14259 struct hfi1_asic_data *asic_data;
14262 /* pre-allocate the asic structure in case we are the first device */
14263 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14267 spin_lock_irqsave(&hfi1_devs_lock, flags);
14268 /* Find our peer device */
14269 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14270 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14271 dd->unit != tmp->unit) {
14278 /* use already allocated structure */
14279 dd->asic_data = peer->asic_data;
14282 dd->asic_data = asic_data;
14283 mutex_init(&dd->asic_data->asic_resource_mutex);
14285 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14286 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14288 /* first one through - set up i2c devices */
14290 ret = set_up_i2c(dd, dd->asic_data);
14296 * Set dd->boardname. Use a generic name if a name is not returned from
14297 * EFI variable space.
14299 * Return 0 on success, -ENOMEM if space could not be allocated.
14301 static int obtain_boardname(struct hfi1_devdata *dd)
14303 /* generic board description */
14304 const char generic[] =
14305 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14306 unsigned long size;
14309 ret = read_hfi1_efi_var(dd, "description", &size,
14310 (void **)&dd->boardname);
14312 dd_dev_info(dd, "Board description not found\n");
14313 /* use generic description */
14314 dd->boardname = kstrdup(generic, GFP_KERNEL);
14315 if (!dd->boardname)
14322 * Check the interrupt registers to make sure that they are mapped correctly.
14323 * It is intended to help user identify any mismapping by VMM when the driver
14324 * is running in a VM. This function should only be called before interrupt
14325 * is set up properly.
14327 * Return 0 on success, -EINVAL on failure.
14329 static int check_int_registers(struct hfi1_devdata *dd)
14332 u64 all_bits = ~(u64)0;
14335 /* Clear CceIntMask[0] to avoid raising any interrupts */
14336 mask = read_csr(dd, CCE_INT_MASK);
14337 write_csr(dd, CCE_INT_MASK, 0ull);
14338 reg = read_csr(dd, CCE_INT_MASK);
14342 /* Clear all interrupt status bits */
14343 write_csr(dd, CCE_INT_CLEAR, all_bits);
14344 reg = read_csr(dd, CCE_INT_STATUS);
14348 /* Set all interrupt status bits */
14349 write_csr(dd, CCE_INT_FORCE, all_bits);
14350 reg = read_csr(dd, CCE_INT_STATUS);
14351 if (reg != all_bits)
14354 /* Restore the interrupt mask */
14355 write_csr(dd, CCE_INT_CLEAR, all_bits);
14356 write_csr(dd, CCE_INT_MASK, mask);
14360 write_csr(dd, CCE_INT_MASK, mask);
14361 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14366 * Allocate and initialize the device structure for the hfi.
14367 * @dev: the pci_dev for hfi1_ib device
14368 * @ent: pci_device_id struct for this dev
14370 * Also allocates, initializes, and returns the devdata struct for this
14373 * This is global, and is called directly at init to set up the
14374 * chip-specific function pointers for later use.
14376 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14377 const struct pci_device_id *ent)
14379 struct hfi1_devdata *dd;
14380 struct hfi1_pportdata *ppd;
14383 static const char * const inames[] = { /* implementation names */
14385 "RTL VCS simulation",
14386 "RTL FPGA emulation",
14387 "Functional simulator"
14389 struct pci_dev *parent = pdev->bus->self;
14391 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14392 sizeof(struct hfi1_pportdata));
14396 for (i = 0; i < dd->num_pports; i++, ppd++) {
14398 /* init common fields */
14399 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14400 /* DC supports 4 link widths */
14401 ppd->link_width_supported =
14402 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14403 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14404 ppd->link_width_downgrade_supported =
14405 ppd->link_width_supported;
14406 /* start out enabling only 4X */
14407 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14408 ppd->link_width_downgrade_enabled =
14409 ppd->link_width_downgrade_supported;
14410 /* link width active is 0 when link is down */
14411 /* link width downgrade active is 0 when link is down */
14413 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14414 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14415 hfi1_early_err(&pdev->dev,
14416 "Invalid num_vls %u, using %u VLs\n",
14417 num_vls, HFI1_MAX_VLS_SUPPORTED);
14418 num_vls = HFI1_MAX_VLS_SUPPORTED;
14420 ppd->vls_supported = num_vls;
14421 ppd->vls_operational = ppd->vls_supported;
14422 ppd->actual_vls_operational = ppd->vls_supported;
14423 /* Set the default MTU. */
14424 for (vl = 0; vl < num_vls; vl++)
14425 dd->vld[vl].mtu = hfi1_max_mtu;
14426 dd->vld[15].mtu = MAX_MAD_PACKET;
14428 * Set the initial values to reasonable default, will be set
14429 * for real when link is up.
14431 ppd->lstate = IB_PORT_DOWN;
14432 ppd->overrun_threshold = 0x4;
14433 ppd->phy_error_threshold = 0xf;
14434 ppd->port_crc_mode_enabled = link_crc_mask;
14435 /* initialize supported LTP CRC mode */
14436 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14437 /* initialize enabled LTP CRC mode */
14438 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14439 /* start in offline */
14440 ppd->host_link_state = HLS_DN_OFFLINE;
14441 init_vl_arb_caches(ppd);
14442 ppd->last_pstate = 0xff; /* invalid value */
14445 dd->link_default = HLS_DN_POLL;
14448 * Do remaining PCIe setup and save PCIe values in dd.
14449 * Any error printing is already done by the init code.
14450 * On return, we have the chip mapped.
14452 ret = hfi1_pcie_ddinit(dd, pdev);
14456 /* verify that reads actually work, save revision for reset check */
14457 dd->revision = read_csr(dd, CCE_REVISION);
14458 if (dd->revision == ~(u64)0) {
14459 dd_dev_err(dd, "cannot read chip CSRs\n");
14463 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14464 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14465 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14466 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14469 * Check interrupt registers mapping if the driver has no access to
14470 * the upstream component. In this case, it is likely that the driver
14471 * is running in a VM.
14474 ret = check_int_registers(dd);
14480 * obtain the hardware ID - NOT related to unit, which is a
14481 * software enumeration
14483 reg = read_csr(dd, CCE_REVISION2);
14484 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14485 & CCE_REVISION2_HFI_ID_MASK;
14486 /* the variable size will remove unwanted bits */
14487 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14488 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14489 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14490 dd->icode < ARRAY_SIZE(inames) ?
14491 inames[dd->icode] : "unknown", (int)dd->irev);
14493 /* speeds the hardware can support */
14494 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14495 /* speeds allowed to run at */
14496 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14497 /* give a reasonable active value, will be set on link up */
14498 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14500 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14501 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14502 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14503 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14504 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14505 /* fix up link widths for emulation _p */
14507 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14508 ppd->link_width_supported =
14509 ppd->link_width_enabled =
14510 ppd->link_width_downgrade_supported =
14511 ppd->link_width_downgrade_enabled =
14514 /* insure num_vls isn't larger than number of sdma engines */
14515 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14516 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14517 num_vls, dd->chip_sdma_engines);
14518 num_vls = dd->chip_sdma_engines;
14519 ppd->vls_supported = dd->chip_sdma_engines;
14520 ppd->vls_operational = ppd->vls_supported;
14524 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14525 * Limit the max if larger than the field holds. If timeout is
14526 * non-zero, then the calculated field will be at least 1.
14528 * Must be after icode is set up - the cclock rate depends
14529 * on knowing the hardware being used.
14531 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14532 if (dd->rcv_intr_timeout_csr >
14533 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14534 dd->rcv_intr_timeout_csr =
14535 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14536 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14537 dd->rcv_intr_timeout_csr = 1;
14539 /* needs to be done before we look for the peer device */
14542 /* set up shared ASIC data with peer device */
14543 ret = init_asic_data(dd);
14547 /* obtain chip sizes, reset chip CSRs */
14550 /* read in the PCIe link speed information */
14551 ret = pcie_speeds(dd);
14555 /* call before get_platform_config(), after init_chip_resources() */
14556 ret = eprom_init(dd);
14558 goto bail_free_rcverr;
14560 /* Needs to be called before hfi1_firmware_init */
14561 get_platform_config(dd);
14563 /* read in firmware */
14564 ret = hfi1_firmware_init(dd);
14569 * In general, the PCIe Gen3 transition must occur after the
14570 * chip has been idled (so it won't initiate any PCIe transactions
14571 * e.g. an interrupt) and before the driver changes any registers
14572 * (the transition will reset the registers).
14574 * In particular, place this call after:
14575 * - init_chip() - the chip will not initiate any PCIe transactions
14576 * - pcie_speeds() - reads the current link speed
14577 * - hfi1_firmware_init() - the needed firmware is ready to be
14580 ret = do_pcie_gen3_transition(dd);
14584 /* start setting dd values and adjusting CSRs */
14585 init_early_variables(dd);
14587 parse_platform_config(dd);
14589 ret = obtain_boardname(dd);
14593 snprintf(dd->boardversion, BOARD_VERS_MAX,
14594 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14595 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14598 (dd->revision >> CCE_REVISION_SW_SHIFT)
14599 & CCE_REVISION_SW_MASK);
14601 ret = set_up_context_variables(dd);
14605 /* set initial RXE CSRs */
14607 /* set initial TXE CSRs */
14609 /* set initial non-RXE, non-TXE CSRs */
14611 /* set up KDETH QP prefix in both RX and TX CSRs */
14614 ret = hfi1_dev_affinity_init(dd);
14618 /* send contexts must be set up before receive contexts */
14619 ret = init_send_contexts(dd);
14623 ret = hfi1_create_ctxts(dd);
14627 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14629 * rcd[0] is guaranteed to be valid by this point. Also, all
14630 * context are using the same value, as per the module parameter.
14632 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14634 ret = init_pervl_scs(dd);
14639 for (i = 0; i < dd->num_pports; ++i) {
14640 ret = sdma_init(dd, i);
14645 /* use contexts created by hfi1_create_ctxts */
14646 ret = set_up_interrupts(dd);
14650 /* set up LCB access - must be after set_up_interrupts() */
14651 init_lcb_access(dd);
14654 * Serial number is created from the base guid:
14655 * [27:24] = base guid [38:35]
14656 * [23: 0] = base guid [23: 0]
14658 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14659 (dd->base_guid & 0xFFFFFF) |
14660 ((dd->base_guid >> 11) & 0xF000000));
14662 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14663 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14664 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14666 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14668 goto bail_clear_intr;
14672 ret = init_cntrs(dd);
14674 goto bail_clear_intr;
14676 ret = init_rcverr(dd);
14678 goto bail_free_cntrs;
14680 init_completion(&dd->user_comp);
14682 /* The user refcount starts with one to inidicate an active device */
14683 atomic_set(&dd->user_refcount, 1);
14692 clean_up_interrupts(dd);
14694 hfi1_pcie_ddcleanup(dd);
14696 hfi1_free_devdata(dd);
14702 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14706 u32 current_egress_rate = ppd->current_egress_rate;
14707 /* rates here are in units of 10^6 bits/sec */
14709 if (desired_egress_rate == -1)
14710 return 0; /* shouldn't happen */
14712 if (desired_egress_rate >= current_egress_rate)
14713 return 0; /* we can't help go faster, only slower */
14715 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14716 egress_cycles(dw_len * 4, current_egress_rate);
14718 return (u16)delta_cycles;
14722 * create_pbc - build a pbc for transmission
14723 * @flags: special case flags or-ed in built pbc
14724 * @srate: static rate
14726 * @dwlen: dword length (header words + data words + pbc words)
14728 * Create a PBC with the given flags, rate, VL, and length.
14730 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14731 * for verbs, which does not use this PSM feature. The lone other caller
14732 * is for the diagnostic interface which calls this if the user does not
14733 * supply their own PBC.
14735 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14738 u64 pbc, delay = 0;
14740 if (unlikely(srate_mbs))
14741 delay = delay_cycles(ppd, srate_mbs, dw_len);
14744 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14745 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14746 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14747 | (dw_len & PBC_LENGTH_DWS_MASK)
14748 << PBC_LENGTH_DWS_SHIFT;
14753 #define SBUS_THERMAL 0x4f
14754 #define SBUS_THERM_MONITOR_MODE 0x1
14756 #define THERM_FAILURE(dev, ret, reason) \
14758 "Thermal sensor initialization failed: %s (%d)\n", \
14762 * Initialize the thermal sensor.
14764 * After initialization, enable polling of thermal sensor through
14765 * SBus interface. In order for this to work, the SBus Master
14766 * firmware has to be loaded due to the fact that the HW polling
14767 * logic uses SBus interrupts, which are not supported with
14768 * default firmware. Otherwise, no data will be returned through
14769 * the ASIC_STS_THERM CSR.
14771 static int thermal_init(struct hfi1_devdata *dd)
14775 if (dd->icode != ICODE_RTL_SILICON ||
14776 check_chip_resource(dd, CR_THERM_INIT, NULL))
14779 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14781 THERM_FAILURE(dd, ret, "Acquire SBus");
14785 dd_dev_info(dd, "Initializing thermal sensor\n");
14786 /* Disable polling of thermal readings */
14787 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14789 /* Thermal Sensor Initialization */
14790 /* Step 1: Reset the Thermal SBus Receiver */
14791 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14792 RESET_SBUS_RECEIVER, 0);
14794 THERM_FAILURE(dd, ret, "Bus Reset");
14797 /* Step 2: Set Reset bit in Thermal block */
14798 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14799 WRITE_SBUS_RECEIVER, 0x1);
14801 THERM_FAILURE(dd, ret, "Therm Block Reset");
14804 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14805 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14806 WRITE_SBUS_RECEIVER, 0x32);
14808 THERM_FAILURE(dd, ret, "Write Clock Div");
14811 /* Step 4: Select temperature mode */
14812 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14813 WRITE_SBUS_RECEIVER,
14814 SBUS_THERM_MONITOR_MODE);
14816 THERM_FAILURE(dd, ret, "Write Mode Sel");
14819 /* Step 5: De-assert block reset and start conversion */
14820 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14821 WRITE_SBUS_RECEIVER, 0x2);
14823 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14826 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14829 /* Enable polling of thermal readings */
14830 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14832 /* Set initialized flag */
14833 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14835 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14838 release_chip_resource(dd, CR_SBUS);
14842 static void handle_temp_err(struct hfi1_devdata *dd)
14844 struct hfi1_pportdata *ppd = &dd->pport[0];
14846 * Thermal Critical Interrupt
14847 * Put the device into forced freeze mode, take link down to
14848 * offline, and put DC into reset.
14851 "Critical temperature reached! Forcing device into freeze mode!\n");
14852 dd->flags |= HFI1_FORCED_FREEZE;
14853 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14855 * Shut DC down as much and as quickly as possible.
14857 * Step 1: Take the link down to OFFLINE. This will cause the
14858 * 8051 to put the Serdes in reset. However, we don't want to
14859 * go through the entire link state machine since we want to
14860 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14861 * but rather an attempt to save the chip.
14862 * Code below is almost the same as quiet_serdes() but avoids
14863 * all the extra work and the sleeps.
14865 ppd->driver_link_ready = 0;
14866 ppd->link_enabled = 0;
14867 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14870 * Step 2: Shutdown LCB and 8051
14871 * After shutdown, do not restore DC_CFG_RESET value.