GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 #include "debugfs.h"
68 #include "fault.h"
69
70 #define NUM_IB_PORTS 1
71
72 uint kdeth_qp;
73 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75
76 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77 module_param(num_vls, uint, S_IRUGO);
78 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79
80 /*
81  * Default time to aggregate two 10K packets from the idle state
82  * (timer not running). The timer starts at the end of the first packet,
83  * so only the time for one 10K packet and header plus a bit extra is needed.
84  * 10 * 1024 + 64 header byte = 10304 byte
85  * 10304 byte / 12.5 GB/s = 824.32ns
86  */
87 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88 module_param(rcv_intr_timeout, uint, S_IRUGO);
89 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90
91 uint rcv_intr_count = 16; /* same as qib */
92 module_param(rcv_intr_count, uint, S_IRUGO);
93 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94
95 ushort link_crc_mask = SUPPORTED_CRCS;
96 module_param(link_crc_mask, ushort, S_IRUGO);
97 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98
99 uint loopback;
100 module_param_named(loopback, loopback, uint, S_IRUGO);
101 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102
103 /* Other driver tunables */
104 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105 static ushort crc_14b_sideband = 1;
106 static uint use_flr = 1;
107 uint quick_linkup; /* skip LNI */
108
109 struct flag_table {
110         u64 flag;       /* the flag */
111         char *str;      /* description string */
112         u16 extra;      /* extra information */
113         u16 unused0;
114         u32 unused1;
115 };
116
117 /* str must be a string constant */
118 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
120
121 /* Send Error Consequences */
122 #define SEC_WRITE_DROPPED       0x1
123 #define SEC_PACKET_DROPPED      0x2
124 #define SEC_SC_HALTED           0x4     /* per-context only */
125 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
126
127 #define DEFAULT_KRCVQS            2
128 #define MIN_KERNEL_KCTXTS         2
129 #define FIRST_KERNEL_KCTXT        1
130
131 /*
132  * RSM instance allocation
133  *   0 - Verbs
134  *   1 - User Fecn Handling
135  *   2 - Vnic
136  */
137 #define RSM_INS_VERBS             0
138 #define RSM_INS_FECN              1
139 #define RSM_INS_VNIC              2
140
141 /* Bit offset into the GUID which carries HFI id information */
142 #define GUID_HFI_INDEX_SHIFT     39
143
144 /* extract the emulation revision */
145 #define emulator_rev(dd) ((dd)->irev >> 8)
146 /* parallel and serial emulation versions are 3 and 4 respectively */
147 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
148 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
149
150 /* RSM fields for Verbs */
151 /* packet type */
152 #define IB_PACKET_TYPE         2ull
153 #define QW_SHIFT               6ull
154 /* QPN[7..1] */
155 #define QPN_WIDTH              7ull
156
157 /* LRH.BTH: QW 0, OFFSET 48 - for match */
158 #define LRH_BTH_QW             0ull
159 #define LRH_BTH_BIT_OFFSET     48ull
160 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
161 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
162 #define LRH_BTH_SELECT
163 #define LRH_BTH_MASK           3ull
164 #define LRH_BTH_VALUE          2ull
165
166 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
167 #define LRH_SC_QW              0ull
168 #define LRH_SC_BIT_OFFSET      56ull
169 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
170 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
171 #define LRH_SC_MASK            128ull
172 #define LRH_SC_VALUE           0ull
173
174 /* SC[n..0] QW 0, OFFSET 60 - for select */
175 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
176
177 /* QPN[m+n:1] QW 1, OFFSET 1 */
178 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
179
180 /* RSM fields for Vnic */
181 /* L2_TYPE: QW 0, OFFSET 61 - for match */
182 #define L2_TYPE_QW             0ull
183 #define L2_TYPE_BIT_OFFSET     61ull
184 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
185 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
186 #define L2_TYPE_MASK           3ull
187 #define L2_16B_VALUE           2ull
188
189 /* L4_TYPE QW 1, OFFSET 0 - for match */
190 #define L4_TYPE_QW              1ull
191 #define L4_TYPE_BIT_OFFSET      0ull
192 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
193 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
194 #define L4_16B_TYPE_MASK        0xFFull
195 #define L4_16B_ETH_VALUE        0x78ull
196
197 /* 16B VESWID - for select */
198 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
199 /* 16B ENTROPY - for select */
200 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
201
202 /* defines to build power on SC2VL table */
203 #define SC2VL_VAL( \
204         num, \
205         sc0, sc0val, \
206         sc1, sc1val, \
207         sc2, sc2val, \
208         sc3, sc3val, \
209         sc4, sc4val, \
210         sc5, sc5val, \
211         sc6, sc6val, \
212         sc7, sc7val) \
213 ( \
214         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
215         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
216         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
217         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
218         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
219         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
220         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
221         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
222 )
223
224 #define DC_SC_VL_VAL( \
225         range, \
226         e0, e0val, \
227         e1, e1val, \
228         e2, e2val, \
229         e3, e3val, \
230         e4, e4val, \
231         e5, e5val, \
232         e6, e6val, \
233         e7, e7val, \
234         e8, e8val, \
235         e9, e9val, \
236         e10, e10val, \
237         e11, e11val, \
238         e12, e12val, \
239         e13, e13val, \
240         e14, e14val, \
241         e15, e15val) \
242 ( \
243         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
244         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
245         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
246         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
247         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
248         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
249         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
250         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
251         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
252         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
253         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
254         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
255         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
256         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
257         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
258         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
259 )
260
261 /* all CceStatus sub-block freeze bits */
262 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
263                         | CCE_STATUS_RXE_FROZE_SMASK \
264                         | CCE_STATUS_TXE_FROZE_SMASK \
265                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
266 /* all CceStatus sub-block TXE pause bits */
267 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
268                         | CCE_STATUS_TXE_PAUSED_SMASK \
269                         | CCE_STATUS_SDMA_PAUSED_SMASK)
270 /* all CceStatus sub-block RXE pause bits */
271 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
272
273 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
274 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
275
276 /*
277  * CCE Error flags.
278  */
279 static struct flag_table cce_err_status_flags[] = {
280 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
281                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
282 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
283                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
284 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
285                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
286 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
287                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
288 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
289                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
290 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
291                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
292 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
293                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
294 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
295                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
296 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
297                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
298 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
300 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
301             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
302 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
303             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
304 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
305                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
306 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
307                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
308 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
309                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
310 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
311                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
312 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
313                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
314 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
315                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
316 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
317                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
318 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
319                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
320 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
321                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
322 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
323                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
324 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
325                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
326 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
327                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
328 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
329                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
330 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
331                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
332 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
333                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
334 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
335                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
336 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
337                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
338 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
339                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
340 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
341                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
342 /*31*/  FLAG_ENTRY0("LATriggered",
343                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
344 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
345                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
346 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
347                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
348 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
349                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
350 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
351                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
352 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
353                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
354 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
355                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
356 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
357                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
358 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
359                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
360 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
361                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
362 /*41-63 reserved*/
363 };
364
365 /*
366  * Misc Error flags
367  */
368 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
369 static struct flag_table misc_err_status_flags[] = {
370 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
371 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
372 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
373 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
374 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
375 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
376 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
377 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
378 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
379 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
380 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
381 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
382 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
383 };
384
385 /*
386  * TXE PIO Error flags and consequences
387  */
388 static struct flag_table pio_err_status_flags[] = {
389 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
390         SEC_WRITE_DROPPED,
391         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
392 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
393         SEC_SPC_FREEZE,
394         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
395 /* 2*/  FLAG_ENTRY("PioCsrParity",
396         SEC_SPC_FREEZE,
397         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
398 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
399         SEC_SPC_FREEZE,
400         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
401 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
402         SEC_SPC_FREEZE,
403         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
404 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
405         SEC_SPC_FREEZE,
406         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
407 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
408         SEC_SPC_FREEZE,
409         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
410 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
411         SEC_SPC_FREEZE,
412         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
413 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
414         SEC_SPC_FREEZE,
415         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
416 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
417         SEC_SPC_FREEZE,
418         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
419 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
420         SEC_SPC_FREEZE,
421         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
422 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
423         SEC_SPC_FREEZE,
424         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
425 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
426         SEC_SPC_FREEZE,
427         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
428 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
429         0,
430         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
431 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
432         0,
433         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
434 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
435         SEC_SPC_FREEZE,
436         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
437 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
438         SEC_SPC_FREEZE,
439         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
440 /*17*/  FLAG_ENTRY("PioInitSmIn",
441         0,
442         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
443 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
444         SEC_SPC_FREEZE,
445         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
446 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
447         SEC_SPC_FREEZE,
448         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
449 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
450         0,
451         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
452 /*21*/  FLAG_ENTRY("PioWriteDataParity",
453         SEC_SPC_FREEZE,
454         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
455 /*22*/  FLAG_ENTRY("PioStateMachine",
456         SEC_SPC_FREEZE,
457         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
458 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
459         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
460         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
461 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
462         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
463         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
464 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
465         SEC_SPC_FREEZE,
466         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
467 /*26*/  FLAG_ENTRY("PioVlfSopParity",
468         SEC_SPC_FREEZE,
469         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
470 /*27*/  FLAG_ENTRY("PioVlFifoParity",
471         SEC_SPC_FREEZE,
472         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
473 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
474         SEC_SPC_FREEZE,
475         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
476 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
477         SEC_SPC_FREEZE,
478         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
479 /*30-31 reserved*/
480 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
481         SEC_SPC_FREEZE,
482         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
483 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
484         SEC_SPC_FREEZE,
485         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
486 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
487         SEC_SPC_FREEZE,
488         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
489 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
490         SEC_SPC_FREEZE,
491         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
492 /*36-63 reserved*/
493 };
494
495 /* TXE PIO errors that cause an SPC freeze */
496 #define ALL_PIO_FREEZE_ERR \
497         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
498         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
499         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
500         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
501         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
502         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
503         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
504         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
505         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
506         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
507         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
508         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
509         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
510         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
511         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
512         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
513         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
514         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
515         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
516         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
517         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
518         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
519         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
520         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
521         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
522         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
523         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
524         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
525         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
526
527 /*
528  * TXE SDMA Error flags
529  */
530 static struct flag_table sdma_err_status_flags[] = {
531 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
532                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
533 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
534                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
535 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
536                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
537 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
538                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
539 /*04-63 reserved*/
540 };
541
542 /* TXE SDMA errors that cause an SPC freeze */
543 #define ALL_SDMA_FREEZE_ERR  \
544                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
545                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
546                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
547
548 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
549 #define PORT_DISCARD_EGRESS_ERRS \
550         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
551         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
552         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
553
554 /*
555  * TXE Egress Error flags
556  */
557 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
558 static struct flag_table egress_err_status_flags[] = {
559 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
560 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
561 /* 2 reserved */
562 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
563                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
564 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
565 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
566 /* 6 reserved */
567 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
568                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
569 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
570                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
571 /* 9-10 reserved */
572 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
573                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
574 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
575 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
576 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
577 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
578 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
579                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
580 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
581                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
582 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
583                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
584 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
585                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
586 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
587                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
588 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
589                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
590 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
591                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
592 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
593                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
594 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
595                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
596 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
597                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
598 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
599                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
600 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
601                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
602 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
603                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
604 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
605                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
606 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
607                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
608 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
609                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
610 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
611                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
612 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
613                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
614 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
615                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
616 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
617                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
618 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
619                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
620 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
621                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
622 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
623                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
624 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
625                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
626 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
627                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
628 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
629 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
630 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
631 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
632 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
633 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
634 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
635 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
636 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
637 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
638 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
639 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
640 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
641 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
642 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
643 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
644 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
645 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
646 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
647 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
648 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
649 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
650                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
651 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
652                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
653 };
654
655 /*
656  * TXE Egress Error Info flags
657  */
658 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
659 static struct flag_table egress_err_info_flags[] = {
660 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
661 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
662 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
664 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
665 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
666 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
667 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
668 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
669 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
670 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
671 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
672 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
673 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
674 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
675 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
676 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
677 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
678 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
679 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
680 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
681 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
682 };
683
684 /* TXE Egress errors that cause an SPC freeze */
685 #define ALL_TXE_EGRESS_FREEZE_ERR \
686         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
687         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
688         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
689         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
690         | SEES(TX_LAUNCH_CSR_PARITY) \
691         | SEES(TX_SBRD_CTL_CSR_PARITY) \
692         | SEES(TX_CONFIG_PARITY) \
693         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
694         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
695         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
696         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
697         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
698         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
699         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
700         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
701         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
702         | SEES(TX_CREDIT_RETURN_PARITY))
703
704 /*
705  * TXE Send error flags
706  */
707 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
708 static struct flag_table send_err_status_flags[] = {
709 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
710 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
711 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
712 };
713
714 /*
715  * TXE Send Context Error flags and consequences
716  */
717 static struct flag_table sc_err_status_flags[] = {
718 /* 0*/  FLAG_ENTRY("InconsistentSop",
719                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
720                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
721 /* 1*/  FLAG_ENTRY("DisallowedPacket",
722                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
723                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
724 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
725                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
726                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
727 /* 3*/  FLAG_ENTRY("WriteOverflow",
728                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
729                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
730 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
731                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
732                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
733 /* 5-63 reserved*/
734 };
735
736 /*
737  * RXE Receive Error flags
738  */
739 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
740 static struct flag_table rxe_err_status_flags[] = {
741 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
742 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
743 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
744 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
745 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
746 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
747 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
748 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
749 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
750 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
751 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
752 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
753 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
754 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
755 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
756 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
757 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
758                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
759 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
760 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
761 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
762                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
763 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
764                 RXES(RBUF_BLOCK_LIST_READ_COR)),
765 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
766                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
767 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
768                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
769 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
770                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
771 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
772                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
773 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
774 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
775 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
776                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
777 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
778 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
779 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
780 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
781 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
782 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
783 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
784 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
785                 RXES(RBUF_FL_INITDONE_PARITY)),
786 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
787                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
788 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
789 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
790 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
791 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
792                 RXES(LOOKUP_DES_PART1_UNC_COR)),
793 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
794                 RXES(LOOKUP_DES_PART2_PARITY)),
795 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
796 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
797 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
798 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
799 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
800 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
801 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
802 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
803 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
804 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
805 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
806 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
807 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
808 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
809 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
810 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
811 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
812 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
813 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
814 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
815 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
816 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
817 };
818
819 /* RXE errors that will trigger an SPC freeze */
820 #define ALL_RXE_FREEZE_ERR  \
821         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
830         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
831         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
832         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
833         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
834         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
835         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
836         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
837         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
838         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
839         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
840         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
841         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
842         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
843         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
844         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
845         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
846         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
847         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
848         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
849         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
850         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
851         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
852         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
853         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
854         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
855         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
856         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
857         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
858         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
859         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
860         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
861         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
862         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
863         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
864         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
865
866 #define RXE_FREEZE_ABORT_MASK \
867         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
868         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
869         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
870
871 /*
872  * DCC Error Flags
873  */
874 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
875 static struct flag_table dcc_err_flags[] = {
876         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
877         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
878         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
879         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
880         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
881         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
882         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
883         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
884         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
885         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
886         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
887         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
888         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
889         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
890         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
891         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
892         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
893         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
894         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
895         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
896         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
897         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
898         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
899         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
900         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
901         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
902         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
903         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
904         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
905         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
906         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
907         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
908         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
909         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
910         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
911         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
912         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
913         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
914         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
915         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
916         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
917         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
918         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
919         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
920         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
921         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
922 };
923
924 /*
925  * LCB error flags
926  */
927 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
928 static struct flag_table lcb_err_flags[] = {
929 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
930 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
931 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
932 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
933                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
934 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
935 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
936 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
937 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
938 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
939 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
940 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
941 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
942 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
943 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
944                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
945 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
946 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
947 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
948 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
949 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
950 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
951                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
952 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
953 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
954 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
955 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
956 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
957 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
958 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
959                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
960 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
961 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
962                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
963 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
964                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
965 };
966
967 /*
968  * DC8051 Error Flags
969  */
970 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
971 static struct flag_table dc8051_err_flags[] = {
972         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
973         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
974         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
975         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
976         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
977         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
978         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
979         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
980         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
981                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
982         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
983 };
984
985 /*
986  * DC8051 Information Error flags
987  *
988  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
989  */
990 static struct flag_table dc8051_info_err_flags[] = {
991         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
992         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
993         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
994         FLAG_ENTRY0("Serdes internal loopback failure",
995                     FAILED_SERDES_INTERNAL_LOOPBACK),
996         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
997         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
998         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
999         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
1000         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
1001         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1002         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1003         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1004         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1005         FLAG_ENTRY0("External Device Request Timeout",
1006                     EXTERNAL_DEVICE_REQ_TIMEOUT),
1007 };
1008
1009 /*
1010  * DC8051 Information Host Information flags
1011  *
1012  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013  */
1014 static struct flag_table dc8051_info_host_msg_flags[] = {
1015         FLAG_ENTRY0("Host request done", 0x0001),
1016         FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1017         FLAG_ENTRY0("BC SMA message", 0x0004),
1018         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1019         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1020         FLAG_ENTRY0("External device config request", 0x0020),
1021         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1022         FLAG_ENTRY0("LinkUp achieved", 0x0080),
1023         FLAG_ENTRY0("Link going down", 0x0100),
1024         FLAG_ENTRY0("Link width downgraded", 0x0200),
1025 };
1026
1027 static u32 encoded_size(u32 size);
1028 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1029 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1030 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1031                                u8 *continuous);
1032 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1033                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1034 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1035                                       u8 *remote_tx_rate, u16 *link_widths);
1036 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1037                                     u8 *flag_bits, u16 *link_widths);
1038 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1039                                   u8 *device_rev);
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042                             u8 *tx_polarity_inversion,
1043                             u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045                                 unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048                            unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050                            unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062                                           u32 state);
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064                            u64 *out_data);
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1067
1068 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070                                             int msecs);
1071 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1072                                   int msecs);
1073 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1076                                    int msecs);
1077 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1078                                          int msecs);
1079 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1080 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1081 static void handle_temp_err(struct hfi1_devdata *dd);
1082 static void dc_shutdown(struct hfi1_devdata *dd);
1083 static void dc_start(struct hfi1_devdata *dd);
1084 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1085                            unsigned int *np);
1086 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1087 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1088 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1089 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1090
1091 /*
1092  * Error interrupt table entry.  This is used as input to the interrupt
1093  * "clear down" routine used for all second tier error interrupt register.
1094  * Second tier interrupt registers have a single bit representing them
1095  * in the top-level CceIntStatus.
1096  */
1097 struct err_reg_info {
1098         u32 status;             /* status CSR offset */
1099         u32 clear;              /* clear CSR offset */
1100         u32 mask;               /* mask CSR offset */
1101         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1102         const char *desc;
1103 };
1104
1105 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1106 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1107 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1108
1109 /*
1110  * Helpers for building HFI and DC error interrupt table entries.  Different
1111  * helpers are needed because of inconsistent register names.
1112  */
1113 #define EE(reg, handler, desc) \
1114         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1115                 handler, desc }
1116 #define DC_EE1(reg, handler, desc) \
1117         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1118 #define DC_EE2(reg, handler, desc) \
1119         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1120
1121 /*
1122  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1123  * another register containing more information.
1124  */
1125 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1126 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1127 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1128 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1129 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1130 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1131 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1132 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1133 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1134         /* the rest are reserved */
1135 };
1136
1137 /*
1138  * Index into the Various section of the interrupt sources
1139  * corresponding to the Critical Temperature interrupt.
1140  */
1141 #define TCRIT_INT_SOURCE 4
1142
1143 /*
1144  * SDMA error interrupt entry - refers to another register containing more
1145  * information.
1146  */
1147 static const struct err_reg_info sdma_eng_err =
1148         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1149
1150 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1151 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1152 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1153 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1154 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1155 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1156         /* rest are reserved */
1157 };
1158
1159 /*
1160  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1161  * register can not be derived from the MTU value because 10K is not
1162  * a power of 2. Therefore, we need a constant. Everything else can
1163  * be calculated.
1164  */
1165 #define DCC_CFG_PORT_MTU_CAP_10240 7
1166
1167 /*
1168  * Table of the DC grouping of error interrupts.  Each entry refers to
1169  * another register containing more information.
1170  */
1171 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1172 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1173 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1174 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1175 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1176         /* the rest are reserved */
1177 };
1178
1179 struct cntr_entry {
1180         /*
1181          * counter name
1182          */
1183         char *name;
1184
1185         /*
1186          * csr to read for name (if applicable)
1187          */
1188         u64 csr;
1189
1190         /*
1191          * offset into dd or ppd to store the counter's value
1192          */
1193         int offset;
1194
1195         /*
1196          * flags
1197          */
1198         u8 flags;
1199
1200         /*
1201          * accessor for stat element, context either dd or ppd
1202          */
1203         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1204                        int mode, u64 data);
1205 };
1206
1207 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1208 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1209
1210 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1211 { \
1212         name, \
1213         csr, \
1214         offset, \
1215         flags, \
1216         accessor \
1217 }
1218
1219 /* 32bit RXE */
1220 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1221 CNTR_ELEM(#name, \
1222           (counter * 8 + RCV_COUNTER_ARRAY32), \
1223           0, flags | CNTR_32BIT, \
1224           port_access_u32_csr)
1225
1226 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1227 CNTR_ELEM(#name, \
1228           (counter * 8 + RCV_COUNTER_ARRAY32), \
1229           0, flags | CNTR_32BIT, \
1230           dev_access_u32_csr)
1231
1232 /* 64bit RXE */
1233 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1234 CNTR_ELEM(#name, \
1235           (counter * 8 + RCV_COUNTER_ARRAY64), \
1236           0, flags, \
1237           port_access_u64_csr)
1238
1239 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1240 CNTR_ELEM(#name, \
1241           (counter * 8 + RCV_COUNTER_ARRAY64), \
1242           0, flags, \
1243           dev_access_u64_csr)
1244
1245 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1246 #define OVR_ELM(ctx) \
1247 CNTR_ELEM("RcvHdrOvr" #ctx, \
1248           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1249           0, CNTR_NORMAL, port_access_u64_csr)
1250
1251 /* 32bit TXE */
1252 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1253 CNTR_ELEM(#name, \
1254           (counter * 8 + SEND_COUNTER_ARRAY32), \
1255           0, flags | CNTR_32BIT, \
1256           port_access_u32_csr)
1257
1258 /* 64bit TXE */
1259 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1260 CNTR_ELEM(#name, \
1261           (counter * 8 + SEND_COUNTER_ARRAY64), \
1262           0, flags, \
1263           port_access_u64_csr)
1264
1265 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1266 CNTR_ELEM(#name,\
1267           counter * 8 + SEND_COUNTER_ARRAY64, \
1268           0, \
1269           flags, \
1270           dev_access_u64_csr)
1271
1272 /* CCE */
1273 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1274 CNTR_ELEM(#name, \
1275           (counter * 8 + CCE_COUNTER_ARRAY32), \
1276           0, flags | CNTR_32BIT, \
1277           dev_access_u32_csr)
1278
1279 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1280 CNTR_ELEM(#name, \
1281           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1282           0, flags | CNTR_32BIT, \
1283           dev_access_u32_csr)
1284
1285 /* DC */
1286 #define DC_PERF_CNTR(name, counter, flags) \
1287 CNTR_ELEM(#name, \
1288           counter, \
1289           0, \
1290           flags, \
1291           dev_access_u64_csr)
1292
1293 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1294 CNTR_ELEM(#name, \
1295           counter, \
1296           0, \
1297           flags, \
1298           dc_access_lcb_cntr)
1299
1300 /* ibp counters */
1301 #define SW_IBP_CNTR(name, cntr) \
1302 CNTR_ELEM(#name, \
1303           0, \
1304           0, \
1305           CNTR_SYNTH, \
1306           access_ibp_##cntr)
1307
1308 /**
1309  * hfi_addr_from_offset - return addr for readq/writeq
1310  * @dd - the dd device
1311  * @offset - the offset of the CSR within bar0
1312  *
1313  * This routine selects the appropriate base address
1314  * based on the indicated offset.
1315  */
1316 static inline void __iomem *hfi1_addr_from_offset(
1317         const struct hfi1_devdata *dd,
1318         u32 offset)
1319 {
1320         if (offset >= dd->base2_start)
1321                 return dd->kregbase2 + (offset - dd->base2_start);
1322         return dd->kregbase1 + offset;
1323 }
1324
1325 /**
1326  * read_csr - read CSR at the indicated offset
1327  * @dd - the dd device
1328  * @offset - the offset of the CSR within bar0
1329  *
1330  * Return: the value read or all FF's if there
1331  * is no mapping
1332  */
1333 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1334 {
1335         if (dd->flags & HFI1_PRESENT)
1336                 return readq(hfi1_addr_from_offset(dd, offset));
1337         return -1;
1338 }
1339
1340 /**
1341  * write_csr - write CSR at the indicated offset
1342  * @dd - the dd device
1343  * @offset - the offset of the CSR within bar0
1344  * @value - value to write
1345  */
1346 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1347 {
1348         if (dd->flags & HFI1_PRESENT) {
1349                 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1350
1351                 /* avoid write to RcvArray */
1352                 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1353                         return;
1354                 writeq(value, base);
1355         }
1356 }
1357
1358 /**
1359  * get_csr_addr - return te iomem address for offset
1360  * @dd - the dd device
1361  * @offset - the offset of the CSR within bar0
1362  *
1363  * Return: The iomem address to use in subsequent
1364  * writeq/readq operations.
1365  */
1366 void __iomem *get_csr_addr(
1367         const struct hfi1_devdata *dd,
1368         u32 offset)
1369 {
1370         if (dd->flags & HFI1_PRESENT)
1371                 return hfi1_addr_from_offset(dd, offset);
1372         return NULL;
1373 }
1374
1375 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1376                                  int mode, u64 value)
1377 {
1378         u64 ret;
1379
1380         if (mode == CNTR_MODE_R) {
1381                 ret = read_csr(dd, csr);
1382         } else if (mode == CNTR_MODE_W) {
1383                 write_csr(dd, csr, value);
1384                 ret = value;
1385         } else {
1386                 dd_dev_err(dd, "Invalid cntr register access mode");
1387                 return 0;
1388         }
1389
1390         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1391         return ret;
1392 }
1393
1394 /* Dev Access */
1395 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1396                               void *context, int vl, int mode, u64 data)
1397 {
1398         struct hfi1_devdata *dd = context;
1399         u64 csr = entry->csr;
1400
1401         if (entry->flags & CNTR_SDMA) {
1402                 if (vl == CNTR_INVALID_VL)
1403                         return 0;
1404                 csr += 0x100 * vl;
1405         } else {
1406                 if (vl != CNTR_INVALID_VL)
1407                         return 0;
1408         }
1409         return read_write_csr(dd, csr, mode, data);
1410 }
1411
1412 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1413                               void *context, int idx, int mode, u64 data)
1414 {
1415         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1416
1417         if (dd->per_sdma && idx < dd->num_sdma)
1418                 return dd->per_sdma[idx].err_cnt;
1419         return 0;
1420 }
1421
1422 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1423                               void *context, int idx, int mode, u64 data)
1424 {
1425         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1426
1427         if (dd->per_sdma && idx < dd->num_sdma)
1428                 return dd->per_sdma[idx].sdma_int_cnt;
1429         return 0;
1430 }
1431
1432 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1433                                    void *context, int idx, int mode, u64 data)
1434 {
1435         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1436
1437         if (dd->per_sdma && idx < dd->num_sdma)
1438                 return dd->per_sdma[idx].idle_int_cnt;
1439         return 0;
1440 }
1441
1442 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1443                                        void *context, int idx, int mode,
1444                                        u64 data)
1445 {
1446         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1447
1448         if (dd->per_sdma && idx < dd->num_sdma)
1449                 return dd->per_sdma[idx].progress_int_cnt;
1450         return 0;
1451 }
1452
1453 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1454                               int vl, int mode, u64 data)
1455 {
1456         struct hfi1_devdata *dd = context;
1457
1458         u64 val = 0;
1459         u64 csr = entry->csr;
1460
1461         if (entry->flags & CNTR_VL) {
1462                 if (vl == CNTR_INVALID_VL)
1463                         return 0;
1464                 csr += 8 * vl;
1465         } else {
1466                 if (vl != CNTR_INVALID_VL)
1467                         return 0;
1468         }
1469
1470         val = read_write_csr(dd, csr, mode, data);
1471         return val;
1472 }
1473
1474 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1475                               int vl, int mode, u64 data)
1476 {
1477         struct hfi1_devdata *dd = context;
1478         u32 csr = entry->csr;
1479         int ret = 0;
1480
1481         if (vl != CNTR_INVALID_VL)
1482                 return 0;
1483         if (mode == CNTR_MODE_R)
1484                 ret = read_lcb_csr(dd, csr, &data);
1485         else if (mode == CNTR_MODE_W)
1486                 ret = write_lcb_csr(dd, csr, data);
1487
1488         if (ret) {
1489                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1490                 return 0;
1491         }
1492
1493         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1494         return data;
1495 }
1496
1497 /* Port Access */
1498 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1499                                int vl, int mode, u64 data)
1500 {
1501         struct hfi1_pportdata *ppd = context;
1502
1503         if (vl != CNTR_INVALID_VL)
1504                 return 0;
1505         return read_write_csr(ppd->dd, entry->csr, mode, data);
1506 }
1507
1508 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1509                                void *context, int vl, int mode, u64 data)
1510 {
1511         struct hfi1_pportdata *ppd = context;
1512         u64 val;
1513         u64 csr = entry->csr;
1514
1515         if (entry->flags & CNTR_VL) {
1516                 if (vl == CNTR_INVALID_VL)
1517                         return 0;
1518                 csr += 8 * vl;
1519         } else {
1520                 if (vl != CNTR_INVALID_VL)
1521                         return 0;
1522         }
1523         val = read_write_csr(ppd->dd, csr, mode, data);
1524         return val;
1525 }
1526
1527 /* Software defined */
1528 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1529                                 u64 data)
1530 {
1531         u64 ret;
1532
1533         if (mode == CNTR_MODE_R) {
1534                 ret = *cntr;
1535         } else if (mode == CNTR_MODE_W) {
1536                 *cntr = data;
1537                 ret = data;
1538         } else {
1539                 dd_dev_err(dd, "Invalid cntr sw access mode");
1540                 return 0;
1541         }
1542
1543         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1544
1545         return ret;
1546 }
1547
1548 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1549                                  int vl, int mode, u64 data)
1550 {
1551         struct hfi1_pportdata *ppd = context;
1552
1553         if (vl != CNTR_INVALID_VL)
1554                 return 0;
1555         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1556 }
1557
1558 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1559                                  int vl, int mode, u64 data)
1560 {
1561         struct hfi1_pportdata *ppd = context;
1562
1563         if (vl != CNTR_INVALID_VL)
1564                 return 0;
1565         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1566 }
1567
1568 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1569                                        void *context, int vl, int mode,
1570                                        u64 data)
1571 {
1572         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1573
1574         if (vl != CNTR_INVALID_VL)
1575                 return 0;
1576         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1577 }
1578
1579 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1580                                    void *context, int vl, int mode, u64 data)
1581 {
1582         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1583         u64 zero = 0;
1584         u64 *counter;
1585
1586         if (vl == CNTR_INVALID_VL)
1587                 counter = &ppd->port_xmit_discards;
1588         else if (vl >= 0 && vl < C_VL_COUNT)
1589                 counter = &ppd->port_xmit_discards_vl[vl];
1590         else
1591                 counter = &zero;
1592
1593         return read_write_sw(ppd->dd, counter, mode, data);
1594 }
1595
1596 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1597                                        void *context, int vl, int mode,
1598                                        u64 data)
1599 {
1600         struct hfi1_pportdata *ppd = context;
1601
1602         if (vl != CNTR_INVALID_VL)
1603                 return 0;
1604
1605         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1606                              mode, data);
1607 }
1608
1609 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1610                                       void *context, int vl, int mode, u64 data)
1611 {
1612         struct hfi1_pportdata *ppd = context;
1613
1614         if (vl != CNTR_INVALID_VL)
1615                 return 0;
1616
1617         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1618                              mode, data);
1619 }
1620
1621 u64 get_all_cpu_total(u64 __percpu *cntr)
1622 {
1623         int cpu;
1624         u64 counter = 0;
1625
1626         for_each_possible_cpu(cpu)
1627                 counter += *per_cpu_ptr(cntr, cpu);
1628         return counter;
1629 }
1630
1631 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1632                           u64 __percpu *cntr,
1633                           int vl, int mode, u64 data)
1634 {
1635         u64 ret = 0;
1636
1637         if (vl != CNTR_INVALID_VL)
1638                 return 0;
1639
1640         if (mode == CNTR_MODE_R) {
1641                 ret = get_all_cpu_total(cntr) - *z_val;
1642         } else if (mode == CNTR_MODE_W) {
1643                 /* A write can only zero the counter */
1644                 if (data == 0)
1645                         *z_val = get_all_cpu_total(cntr);
1646                 else
1647                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1648         } else {
1649                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1650                 return 0;
1651         }
1652
1653         return ret;
1654 }
1655
1656 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1657                               void *context, int vl, int mode, u64 data)
1658 {
1659         struct hfi1_devdata *dd = context;
1660
1661         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1662                               mode, data);
1663 }
1664
1665 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1666                                    void *context, int vl, int mode, u64 data)
1667 {
1668         struct hfi1_devdata *dd = context;
1669
1670         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1671                               mode, data);
1672 }
1673
1674 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1675                               void *context, int vl, int mode, u64 data)
1676 {
1677         struct hfi1_devdata *dd = context;
1678
1679         return dd->verbs_dev.n_piowait;
1680 }
1681
1682 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1683                                void *context, int vl, int mode, u64 data)
1684 {
1685         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1686
1687         return dd->verbs_dev.n_piodrain;
1688 }
1689
1690 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1691                                    void *context, int vl, int mode, u64 data)
1692 {
1693         struct hfi1_devdata *dd = context;
1694
1695         return dd->ctx0_seq_drop;
1696 }
1697
1698 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1699                               void *context, int vl, int mode, u64 data)
1700 {
1701         struct hfi1_devdata *dd = context;
1702
1703         return dd->verbs_dev.n_txwait;
1704 }
1705
1706 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1707                                void *context, int vl, int mode, u64 data)
1708 {
1709         struct hfi1_devdata *dd = context;
1710
1711         return dd->verbs_dev.n_kmem_wait;
1712 }
1713
1714 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1715                                    void *context, int vl, int mode, u64 data)
1716 {
1717         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1718
1719         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1720                               mode, data);
1721 }
1722
1723 /* Software counters for the error status bits within MISC_ERR_STATUS */
1724 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1725                                              void *context, int vl, int mode,
1726                                              u64 data)
1727 {
1728         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730         return dd->misc_err_status_cnt[12];
1731 }
1732
1733 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1734                                           void *context, int vl, int mode,
1735                                           u64 data)
1736 {
1737         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738
1739         return dd->misc_err_status_cnt[11];
1740 }
1741
1742 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1743                                                void *context, int vl, int mode,
1744                                                u64 data)
1745 {
1746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747
1748         return dd->misc_err_status_cnt[10];
1749 }
1750
1751 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1752                                                  void *context, int vl,
1753                                                  int mode, u64 data)
1754 {
1755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756
1757         return dd->misc_err_status_cnt[9];
1758 }
1759
1760 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1761                                            void *context, int vl, int mode,
1762                                            u64 data)
1763 {
1764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1765
1766         return dd->misc_err_status_cnt[8];
1767 }
1768
1769 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1770                                 const struct cntr_entry *entry,
1771                                 void *context, int vl, int mode, u64 data)
1772 {
1773         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1774
1775         return dd->misc_err_status_cnt[7];
1776 }
1777
1778 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1779                                                 void *context, int vl,
1780                                                 int mode, u64 data)
1781 {
1782         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1783
1784         return dd->misc_err_status_cnt[6];
1785 }
1786
1787 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1788                                               void *context, int vl, int mode,
1789                                               u64 data)
1790 {
1791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1792
1793         return dd->misc_err_status_cnt[5];
1794 }
1795
1796 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1797                                             void *context, int vl, int mode,
1798                                             u64 data)
1799 {
1800         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1801
1802         return dd->misc_err_status_cnt[4];
1803 }
1804
1805 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1806                                                  void *context, int vl,
1807                                                  int mode, u64 data)
1808 {
1809         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1810
1811         return dd->misc_err_status_cnt[3];
1812 }
1813
1814 static u64 access_misc_csr_write_bad_addr_err_cnt(
1815                                 const struct cntr_entry *entry,
1816                                 void *context, int vl, int mode, u64 data)
1817 {
1818         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1819
1820         return dd->misc_err_status_cnt[2];
1821 }
1822
1823 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1824                                                  void *context, int vl,
1825                                                  int mode, u64 data)
1826 {
1827         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1828
1829         return dd->misc_err_status_cnt[1];
1830 }
1831
1832 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1833                                           void *context, int vl, int mode,
1834                                           u64 data)
1835 {
1836         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1837
1838         return dd->misc_err_status_cnt[0];
1839 }
1840
1841 /*
1842  * Software counter for the aggregate of
1843  * individual CceErrStatus counters
1844  */
1845 static u64 access_sw_cce_err_status_aggregated_cnt(
1846                                 const struct cntr_entry *entry,
1847                                 void *context, int vl, int mode, u64 data)
1848 {
1849         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1850
1851         return dd->sw_cce_err_status_aggregate;
1852 }
1853
1854 /*
1855  * Software counters corresponding to each of the
1856  * error status bits within CceErrStatus
1857  */
1858 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1859                                               void *context, int vl, int mode,
1860                                               u64 data)
1861 {
1862         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1863
1864         return dd->cce_err_status_cnt[40];
1865 }
1866
1867 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1868                                           void *context, int vl, int mode,
1869                                           u64 data)
1870 {
1871         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1872
1873         return dd->cce_err_status_cnt[39];
1874 }
1875
1876 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1877                                           void *context, int vl, int mode,
1878                                           u64 data)
1879 {
1880         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1881
1882         return dd->cce_err_status_cnt[38];
1883 }
1884
1885 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1886                                              void *context, int vl, int mode,
1887                                              u64 data)
1888 {
1889         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1890
1891         return dd->cce_err_status_cnt[37];
1892 }
1893
1894 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1895                                              void *context, int vl, int mode,
1896                                              u64 data)
1897 {
1898         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1899
1900         return dd->cce_err_status_cnt[36];
1901 }
1902
1903 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1904                                 const struct cntr_entry *entry,
1905                                 void *context, int vl, int mode, u64 data)
1906 {
1907         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1908
1909         return dd->cce_err_status_cnt[35];
1910 }
1911
1912 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1913                                 const struct cntr_entry *entry,
1914                                 void *context, int vl, int mode, u64 data)
1915 {
1916         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1917
1918         return dd->cce_err_status_cnt[34];
1919 }
1920
1921 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1922                                                  void *context, int vl,
1923                                                  int mode, u64 data)
1924 {
1925         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1926
1927         return dd->cce_err_status_cnt[33];
1928 }
1929
1930 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1931                                                 void *context, int vl, int mode,
1932                                                 u64 data)
1933 {
1934         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1935
1936         return dd->cce_err_status_cnt[32];
1937 }
1938
1939 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1940                                    void *context, int vl, int mode, u64 data)
1941 {
1942         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943
1944         return dd->cce_err_status_cnt[31];
1945 }
1946
1947 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1948                                                void *context, int vl, int mode,
1949                                                u64 data)
1950 {
1951         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952
1953         return dd->cce_err_status_cnt[30];
1954 }
1955
1956 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1957                                               void *context, int vl, int mode,
1958                                               u64 data)
1959 {
1960         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961
1962         return dd->cce_err_status_cnt[29];
1963 }
1964
1965 static u64 access_pcic_transmit_back_parity_err_cnt(
1966                                 const struct cntr_entry *entry,
1967                                 void *context, int vl, int mode, u64 data)
1968 {
1969         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970
1971         return dd->cce_err_status_cnt[28];
1972 }
1973
1974 static u64 access_pcic_transmit_front_parity_err_cnt(
1975                                 const struct cntr_entry *entry,
1976                                 void *context, int vl, int mode, u64 data)
1977 {
1978         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979
1980         return dd->cce_err_status_cnt[27];
1981 }
1982
1983 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1984                                              void *context, int vl, int mode,
1985                                              u64 data)
1986 {
1987         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988
1989         return dd->cce_err_status_cnt[26];
1990 }
1991
1992 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1993                                             void *context, int vl, int mode,
1994                                             u64 data)
1995 {
1996         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997
1998         return dd->cce_err_status_cnt[25];
1999 }
2000
2001 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
2002                                               void *context, int vl, int mode,
2003                                               u64 data)
2004 {
2005         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006
2007         return dd->cce_err_status_cnt[24];
2008 }
2009
2010 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2011                                              void *context, int vl, int mode,
2012                                              u64 data)
2013 {
2014         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015
2016         return dd->cce_err_status_cnt[23];
2017 }
2018
2019 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2020                                                  void *context, int vl,
2021                                                  int mode, u64 data)
2022 {
2023         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024
2025         return dd->cce_err_status_cnt[22];
2026 }
2027
2028 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2029                                          void *context, int vl, int mode,
2030                                          u64 data)
2031 {
2032         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033
2034         return dd->cce_err_status_cnt[21];
2035 }
2036
2037 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2038                                 const struct cntr_entry *entry,
2039                                 void *context, int vl, int mode, u64 data)
2040 {
2041         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042
2043         return dd->cce_err_status_cnt[20];
2044 }
2045
2046 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2047                                                  void *context, int vl,
2048                                                  int mode, u64 data)
2049 {
2050         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051
2052         return dd->cce_err_status_cnt[19];
2053 }
2054
2055 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2056                                              void *context, int vl, int mode,
2057                                              u64 data)
2058 {
2059         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060
2061         return dd->cce_err_status_cnt[18];
2062 }
2063
2064 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2065                                             void *context, int vl, int mode,
2066                                             u64 data)
2067 {
2068         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069
2070         return dd->cce_err_status_cnt[17];
2071 }
2072
2073 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2074                                               void *context, int vl, int mode,
2075                                               u64 data)
2076 {
2077         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078
2079         return dd->cce_err_status_cnt[16];
2080 }
2081
2082 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2083                                              void *context, int vl, int mode,
2084                                              u64 data)
2085 {
2086         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087
2088         return dd->cce_err_status_cnt[15];
2089 }
2090
2091 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2092                                                  void *context, int vl,
2093                                                  int mode, u64 data)
2094 {
2095         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096
2097         return dd->cce_err_status_cnt[14];
2098 }
2099
2100 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2101                                              void *context, int vl, int mode,
2102                                              u64 data)
2103 {
2104         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105
2106         return dd->cce_err_status_cnt[13];
2107 }
2108
2109 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2110                                 const struct cntr_entry *entry,
2111                                 void *context, int vl, int mode, u64 data)
2112 {
2113         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115         return dd->cce_err_status_cnt[12];
2116 }
2117
2118 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2119                                 const struct cntr_entry *entry,
2120                                 void *context, int vl, int mode, u64 data)
2121 {
2122         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123
2124         return dd->cce_err_status_cnt[11];
2125 }
2126
2127 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2128                                 const struct cntr_entry *entry,
2129                                 void *context, int vl, int mode, u64 data)
2130 {
2131         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133         return dd->cce_err_status_cnt[10];
2134 }
2135
2136 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2137                                 const struct cntr_entry *entry,
2138                                 void *context, int vl, int mode, u64 data)
2139 {
2140         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142         return dd->cce_err_status_cnt[9];
2143 }
2144
2145 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2146                                 const struct cntr_entry *entry,
2147                                 void *context, int vl, int mode, u64 data)
2148 {
2149         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151         return dd->cce_err_status_cnt[8];
2152 }
2153
2154 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2155                                                  void *context, int vl,
2156                                                  int mode, u64 data)
2157 {
2158         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160         return dd->cce_err_status_cnt[7];
2161 }
2162
2163 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2164                                 const struct cntr_entry *entry,
2165                                 void *context, int vl, int mode, u64 data)
2166 {
2167         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169         return dd->cce_err_status_cnt[6];
2170 }
2171
2172 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2173                                                void *context, int vl, int mode,
2174                                                u64 data)
2175 {
2176         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178         return dd->cce_err_status_cnt[5];
2179 }
2180
2181 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2182                                           void *context, int vl, int mode,
2183                                           u64 data)
2184 {
2185         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187         return dd->cce_err_status_cnt[4];
2188 }
2189
2190 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2191                                 const struct cntr_entry *entry,
2192                                 void *context, int vl, int mode, u64 data)
2193 {
2194         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196         return dd->cce_err_status_cnt[3];
2197 }
2198
2199 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2200                                                  void *context, int vl,
2201                                                  int mode, u64 data)
2202 {
2203         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205         return dd->cce_err_status_cnt[2];
2206 }
2207
2208 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2209                                                 void *context, int vl,
2210                                                 int mode, u64 data)
2211 {
2212         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214         return dd->cce_err_status_cnt[1];
2215 }
2216
2217 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2218                                          void *context, int vl, int mode,
2219                                          u64 data)
2220 {
2221         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223         return dd->cce_err_status_cnt[0];
2224 }
2225
2226 /*
2227  * Software counters corresponding to each of the
2228  * error status bits within RcvErrStatus
2229  */
2230 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2231                                         void *context, int vl, int mode,
2232                                         u64 data)
2233 {
2234         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235
2236         return dd->rcv_err_status_cnt[63];
2237 }
2238
2239 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2240                                                 void *context, int vl,
2241                                                 int mode, u64 data)
2242 {
2243         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244
2245         return dd->rcv_err_status_cnt[62];
2246 }
2247
2248 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2249                                                void *context, int vl, int mode,
2250                                                u64 data)
2251 {
2252         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253
2254         return dd->rcv_err_status_cnt[61];
2255 }
2256
2257 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2258                                          void *context, int vl, int mode,
2259                                          u64 data)
2260 {
2261         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262
2263         return dd->rcv_err_status_cnt[60];
2264 }
2265
2266 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2267                                                  void *context, int vl,
2268                                                  int mode, u64 data)
2269 {
2270         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271
2272         return dd->rcv_err_status_cnt[59];
2273 }
2274
2275 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2276                                                  void *context, int vl,
2277                                                  int mode, u64 data)
2278 {
2279         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280
2281         return dd->rcv_err_status_cnt[58];
2282 }
2283
2284 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2285                                             void *context, int vl, int mode,
2286                                             u64 data)
2287 {
2288         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289
2290         return dd->rcv_err_status_cnt[57];
2291 }
2292
2293 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2294                                            void *context, int vl, int mode,
2295                                            u64 data)
2296 {
2297         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298
2299         return dd->rcv_err_status_cnt[56];
2300 }
2301
2302 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2303                                            void *context, int vl, int mode,
2304                                            u64 data)
2305 {
2306         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307
2308         return dd->rcv_err_status_cnt[55];
2309 }
2310
2311 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2312                                 const struct cntr_entry *entry,
2313                                 void *context, int vl, int mode, u64 data)
2314 {
2315         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316
2317         return dd->rcv_err_status_cnt[54];
2318 }
2319
2320 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2321                                 const struct cntr_entry *entry,
2322                                 void *context, int vl, int mode, u64 data)
2323 {
2324         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325
2326         return dd->rcv_err_status_cnt[53];
2327 }
2328
2329 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2330                                                  void *context, int vl,
2331                                                  int mode, u64 data)
2332 {
2333         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334
2335         return dd->rcv_err_status_cnt[52];
2336 }
2337
2338 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2339                                                  void *context, int vl,
2340                                                  int mode, u64 data)
2341 {
2342         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343
2344         return dd->rcv_err_status_cnt[51];
2345 }
2346
2347 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2348                                                  void *context, int vl,
2349                                                  int mode, u64 data)
2350 {
2351         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352
2353         return dd->rcv_err_status_cnt[50];
2354 }
2355
2356 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2357                                                  void *context, int vl,
2358                                                  int mode, u64 data)
2359 {
2360         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361
2362         return dd->rcv_err_status_cnt[49];
2363 }
2364
2365 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2366                                                  void *context, int vl,
2367                                                  int mode, u64 data)
2368 {
2369         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370
2371         return dd->rcv_err_status_cnt[48];
2372 }
2373
2374 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2375                                                  void *context, int vl,
2376                                                  int mode, u64 data)
2377 {
2378         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379
2380         return dd->rcv_err_status_cnt[47];
2381 }
2382
2383 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2384                                          void *context, int vl, int mode,
2385                                          u64 data)
2386 {
2387         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388
2389         return dd->rcv_err_status_cnt[46];
2390 }
2391
2392 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2393                                 const struct cntr_entry *entry,
2394                                 void *context, int vl, int mode, u64 data)
2395 {
2396         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397
2398         return dd->rcv_err_status_cnt[45];
2399 }
2400
2401 static u64 access_rx_lookup_csr_parity_err_cnt(
2402                                 const struct cntr_entry *entry,
2403                                 void *context, int vl, int mode, u64 data)
2404 {
2405         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406
2407         return dd->rcv_err_status_cnt[44];
2408 }
2409
2410 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2411                                 const struct cntr_entry *entry,
2412                                 void *context, int vl, int mode, u64 data)
2413 {
2414         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415
2416         return dd->rcv_err_status_cnt[43];
2417 }
2418
2419 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2420                                 const struct cntr_entry *entry,
2421                                 void *context, int vl, int mode, u64 data)
2422 {
2423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424
2425         return dd->rcv_err_status_cnt[42];
2426 }
2427
2428 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2429                                 const struct cntr_entry *entry,
2430                                 void *context, int vl, int mode, u64 data)
2431 {
2432         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433
2434         return dd->rcv_err_status_cnt[41];
2435 }
2436
2437 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2438                                 const struct cntr_entry *entry,
2439                                 void *context, int vl, int mode, u64 data)
2440 {
2441         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442
2443         return dd->rcv_err_status_cnt[40];
2444 }
2445
2446 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2447                                 const struct cntr_entry *entry,
2448                                 void *context, int vl, int mode, u64 data)
2449 {
2450         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451
2452         return dd->rcv_err_status_cnt[39];
2453 }
2454
2455 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2456                                 const struct cntr_entry *entry,
2457                                 void *context, int vl, int mode, u64 data)
2458 {
2459         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460
2461         return dd->rcv_err_status_cnt[38];
2462 }
2463
2464 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2465                                 const struct cntr_entry *entry,
2466                                 void *context, int vl, int mode, u64 data)
2467 {
2468         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469
2470         return dd->rcv_err_status_cnt[37];
2471 }
2472
2473 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2474                                 const struct cntr_entry *entry,
2475                                 void *context, int vl, int mode, u64 data)
2476 {
2477         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478
2479         return dd->rcv_err_status_cnt[36];
2480 }
2481
2482 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2483                                 const struct cntr_entry *entry,
2484                                 void *context, int vl, int mode, u64 data)
2485 {
2486         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487
2488         return dd->rcv_err_status_cnt[35];
2489 }
2490
2491 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2492                                 const struct cntr_entry *entry,
2493                                 void *context, int vl, int mode, u64 data)
2494 {
2495         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496
2497         return dd->rcv_err_status_cnt[34];
2498 }
2499
2500 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2501                                 const struct cntr_entry *entry,
2502                                 void *context, int vl, int mode, u64 data)
2503 {
2504         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505
2506         return dd->rcv_err_status_cnt[33];
2507 }
2508
2509 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2510                                         void *context, int vl, int mode,
2511                                         u64 data)
2512 {
2513         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514
2515         return dd->rcv_err_status_cnt[32];
2516 }
2517
2518 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2519                                        void *context, int vl, int mode,
2520                                        u64 data)
2521 {
2522         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523
2524         return dd->rcv_err_status_cnt[31];
2525 }
2526
2527 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2528                                           void *context, int vl, int mode,
2529                                           u64 data)
2530 {
2531         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532
2533         return dd->rcv_err_status_cnt[30];
2534 }
2535
2536 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2537                                              void *context, int vl, int mode,
2538                                              u64 data)
2539 {
2540         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541
2542         return dd->rcv_err_status_cnt[29];
2543 }
2544
2545 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2546                                                  void *context, int vl,
2547                                                  int mode, u64 data)
2548 {
2549         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550
2551         return dd->rcv_err_status_cnt[28];
2552 }
2553
2554 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2555                                 const struct cntr_entry *entry,
2556                                 void *context, int vl, int mode, u64 data)
2557 {
2558         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559
2560         return dd->rcv_err_status_cnt[27];
2561 }
2562
2563 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2564                                 const struct cntr_entry *entry,
2565                                 void *context, int vl, int mode, u64 data)
2566 {
2567         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568
2569         return dd->rcv_err_status_cnt[26];
2570 }
2571
2572 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2573                                 const struct cntr_entry *entry,
2574                                 void *context, int vl, int mode, u64 data)
2575 {
2576         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577
2578         return dd->rcv_err_status_cnt[25];
2579 }
2580
2581 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2582                                 const struct cntr_entry *entry,
2583                                 void *context, int vl, int mode, u64 data)
2584 {
2585         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586
2587         return dd->rcv_err_status_cnt[24];
2588 }
2589
2590 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2591                                 const struct cntr_entry *entry,
2592                                 void *context, int vl, int mode, u64 data)
2593 {
2594         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595
2596         return dd->rcv_err_status_cnt[23];
2597 }
2598
2599 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2600                                 const struct cntr_entry *entry,
2601                                 void *context, int vl, int mode, u64 data)
2602 {
2603         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604
2605         return dd->rcv_err_status_cnt[22];
2606 }
2607
2608 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2609                                 const struct cntr_entry *entry,
2610                                 void *context, int vl, int mode, u64 data)
2611 {
2612         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613
2614         return dd->rcv_err_status_cnt[21];
2615 }
2616
2617 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2618                                 const struct cntr_entry *entry,
2619                                 void *context, int vl, int mode, u64 data)
2620 {
2621         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622
2623         return dd->rcv_err_status_cnt[20];
2624 }
2625
2626 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2627                                 const struct cntr_entry *entry,
2628                                 void *context, int vl, int mode, u64 data)
2629 {
2630         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631
2632         return dd->rcv_err_status_cnt[19];
2633 }
2634
2635 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2636                                                  void *context, int vl,
2637                                                  int mode, u64 data)
2638 {
2639         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640
2641         return dd->rcv_err_status_cnt[18];
2642 }
2643
2644 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2645                                                  void *context, int vl,
2646                                                  int mode, u64 data)
2647 {
2648         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649
2650         return dd->rcv_err_status_cnt[17];
2651 }
2652
2653 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2654                                 const struct cntr_entry *entry,
2655                                 void *context, int vl, int mode, u64 data)
2656 {
2657         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658
2659         return dd->rcv_err_status_cnt[16];
2660 }
2661
2662 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2663                                 const struct cntr_entry *entry,
2664                                 void *context, int vl, int mode, u64 data)
2665 {
2666         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667
2668         return dd->rcv_err_status_cnt[15];
2669 }
2670
2671 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2672                                                 void *context, int vl,
2673                                                 int mode, u64 data)
2674 {
2675         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676
2677         return dd->rcv_err_status_cnt[14];
2678 }
2679
2680 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2681                                                 void *context, int vl,
2682                                                 int mode, u64 data)
2683 {
2684         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685
2686         return dd->rcv_err_status_cnt[13];
2687 }
2688
2689 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2690                                               void *context, int vl, int mode,
2691                                               u64 data)
2692 {
2693         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695         return dd->rcv_err_status_cnt[12];
2696 }
2697
2698 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2699                                           void *context, int vl, int mode,
2700                                           u64 data)
2701 {
2702         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703
2704         return dd->rcv_err_status_cnt[11];
2705 }
2706
2707 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2708                                           void *context, int vl, int mode,
2709                                           u64 data)
2710 {
2711         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713         return dd->rcv_err_status_cnt[10];
2714 }
2715
2716 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2717                                                void *context, int vl, int mode,
2718                                                u64 data)
2719 {
2720         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722         return dd->rcv_err_status_cnt[9];
2723 }
2724
2725 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2726                                             void *context, int vl, int mode,
2727                                             u64 data)
2728 {
2729         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731         return dd->rcv_err_status_cnt[8];
2732 }
2733
2734 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2735                                 const struct cntr_entry *entry,
2736                                 void *context, int vl, int mode, u64 data)
2737 {
2738         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740         return dd->rcv_err_status_cnt[7];
2741 }
2742
2743 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2744                                 const struct cntr_entry *entry,
2745                                 void *context, int vl, int mode, u64 data)
2746 {
2747         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749         return dd->rcv_err_status_cnt[6];
2750 }
2751
2752 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2753                                           void *context, int vl, int mode,
2754                                           u64 data)
2755 {
2756         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758         return dd->rcv_err_status_cnt[5];
2759 }
2760
2761 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2762                                           void *context, int vl, int mode,
2763                                           u64 data)
2764 {
2765         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767         return dd->rcv_err_status_cnt[4];
2768 }
2769
2770 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2771                                          void *context, int vl, int mode,
2772                                          u64 data)
2773 {
2774         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776         return dd->rcv_err_status_cnt[3];
2777 }
2778
2779 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2780                                          void *context, int vl, int mode,
2781                                          u64 data)
2782 {
2783         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785         return dd->rcv_err_status_cnt[2];
2786 }
2787
2788 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2789                                             void *context, int vl, int mode,
2790                                             u64 data)
2791 {
2792         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794         return dd->rcv_err_status_cnt[1];
2795 }
2796
2797 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2798                                          void *context, int vl, int mode,
2799                                          u64 data)
2800 {
2801         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803         return dd->rcv_err_status_cnt[0];
2804 }
2805
2806 /*
2807  * Software counters corresponding to each of the
2808  * error status bits within SendPioErrStatus
2809  */
2810 static u64 access_pio_pec_sop_head_parity_err_cnt(
2811                                 const struct cntr_entry *entry,
2812                                 void *context, int vl, int mode, u64 data)
2813 {
2814         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815
2816         return dd->send_pio_err_status_cnt[35];
2817 }
2818
2819 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2820                                 const struct cntr_entry *entry,
2821                                 void *context, int vl, int mode, u64 data)
2822 {
2823         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824
2825         return dd->send_pio_err_status_cnt[34];
2826 }
2827
2828 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2829                                 const struct cntr_entry *entry,
2830                                 void *context, int vl, int mode, u64 data)
2831 {
2832         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833
2834         return dd->send_pio_err_status_cnt[33];
2835 }
2836
2837 static u64 access_pio_current_free_cnt_parity_err_cnt(
2838                                 const struct cntr_entry *entry,
2839                                 void *context, int vl, int mode, u64 data)
2840 {
2841         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842
2843         return dd->send_pio_err_status_cnt[32];
2844 }
2845
2846 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2847                                           void *context, int vl, int mode,
2848                                           u64 data)
2849 {
2850         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851
2852         return dd->send_pio_err_status_cnt[31];
2853 }
2854
2855 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2856                                           void *context, int vl, int mode,
2857                                           u64 data)
2858 {
2859         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860
2861         return dd->send_pio_err_status_cnt[30];
2862 }
2863
2864 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2865                                            void *context, int vl, int mode,
2866                                            u64 data)
2867 {
2868         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869
2870         return dd->send_pio_err_status_cnt[29];
2871 }
2872
2873 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2874                                 const struct cntr_entry *entry,
2875                                 void *context, int vl, int mode, u64 data)
2876 {
2877         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878
2879         return dd->send_pio_err_status_cnt[28];
2880 }
2881
2882 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2883                                              void *context, int vl, int mode,
2884                                              u64 data)
2885 {
2886         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887
2888         return dd->send_pio_err_status_cnt[27];
2889 }
2890
2891 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2892                                              void *context, int vl, int mode,
2893                                              u64 data)
2894 {
2895         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896
2897         return dd->send_pio_err_status_cnt[26];
2898 }
2899
2900 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2901                                                 void *context, int vl,
2902                                                 int mode, u64 data)
2903 {
2904         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905
2906         return dd->send_pio_err_status_cnt[25];
2907 }
2908
2909 static u64 access_pio_block_qw_count_parity_err_cnt(
2910                                 const struct cntr_entry *entry,
2911                                 void *context, int vl, int mode, u64 data)
2912 {
2913         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914
2915         return dd->send_pio_err_status_cnt[24];
2916 }
2917
2918 static u64 access_pio_write_qw_valid_parity_err_cnt(
2919                                 const struct cntr_entry *entry,
2920                                 void *context, int vl, int mode, u64 data)
2921 {
2922         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923
2924         return dd->send_pio_err_status_cnt[23];
2925 }
2926
2927 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2928                                             void *context, int vl, int mode,
2929                                             u64 data)
2930 {
2931         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932
2933         return dd->send_pio_err_status_cnt[22];
2934 }
2935
2936 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2937                                                 void *context, int vl,
2938                                                 int mode, u64 data)
2939 {
2940         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941
2942         return dd->send_pio_err_status_cnt[21];
2943 }
2944
2945 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2946                                                 void *context, int vl,
2947                                                 int mode, u64 data)
2948 {
2949         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950
2951         return dd->send_pio_err_status_cnt[20];
2952 }
2953
2954 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2955                                                 void *context, int vl,
2956                                                 int mode, u64 data)
2957 {
2958         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959
2960         return dd->send_pio_err_status_cnt[19];
2961 }
2962
2963 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2964                                 const struct cntr_entry *entry,
2965                                 void *context, int vl, int mode, u64 data)
2966 {
2967         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968
2969         return dd->send_pio_err_status_cnt[18];
2970 }
2971
2972 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2973                                          void *context, int vl, int mode,
2974                                          u64 data)
2975 {
2976         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977
2978         return dd->send_pio_err_status_cnt[17];
2979 }
2980
2981 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2982                                             void *context, int vl, int mode,
2983                                             u64 data)
2984 {
2985         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986
2987         return dd->send_pio_err_status_cnt[16];
2988 }
2989
2990 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2991                                 const struct cntr_entry *entry,
2992                                 void *context, int vl, int mode, u64 data)
2993 {
2994         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995
2996         return dd->send_pio_err_status_cnt[15];
2997 }
2998
2999 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
3000                                 const struct cntr_entry *entry,
3001                                 void *context, int vl, int mode, u64 data)
3002 {
3003         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004
3005         return dd->send_pio_err_status_cnt[14];
3006 }
3007
3008 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
3009                                 const struct cntr_entry *entry,
3010                                 void *context, int vl, int mode, u64 data)
3011 {
3012         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013
3014         return dd->send_pio_err_status_cnt[13];
3015 }
3016
3017 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3018                                 const struct cntr_entry *entry,
3019                                 void *context, int vl, int mode, u64 data)
3020 {
3021         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023         return dd->send_pio_err_status_cnt[12];
3024 }
3025
3026 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3027                                 const struct cntr_entry *entry,
3028                                 void *context, int vl, int mode, u64 data)
3029 {
3030         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032         return dd->send_pio_err_status_cnt[11];
3033 }
3034
3035 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3036                                 const struct cntr_entry *entry,
3037                                 void *context, int vl, int mode, u64 data)
3038 {
3039         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041         return dd->send_pio_err_status_cnt[10];
3042 }
3043
3044 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3045                                 const struct cntr_entry *entry,
3046                                 void *context, int vl, int mode, u64 data)
3047 {
3048         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050         return dd->send_pio_err_status_cnt[9];
3051 }
3052
3053 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3054                                 const struct cntr_entry *entry,
3055                                 void *context, int vl, int mode, u64 data)
3056 {
3057         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059         return dd->send_pio_err_status_cnt[8];
3060 }
3061
3062 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3063                                 const struct cntr_entry *entry,
3064                                 void *context, int vl, int mode, u64 data)
3065 {
3066         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068         return dd->send_pio_err_status_cnt[7];
3069 }
3070
3071 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3072                                               void *context, int vl, int mode,
3073                                               u64 data)
3074 {
3075         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3076
3077         return dd->send_pio_err_status_cnt[6];
3078 }
3079
3080 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3081                                               void *context, int vl, int mode,
3082                                               u64 data)
3083 {
3084         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3085
3086         return dd->send_pio_err_status_cnt[5];
3087 }
3088
3089 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3090                                            void *context, int vl, int mode,
3091                                            u64 data)
3092 {
3093         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3094
3095         return dd->send_pio_err_status_cnt[4];
3096 }
3097
3098 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3099                                            void *context, int vl, int mode,
3100                                            u64 data)
3101 {
3102         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3103
3104         return dd->send_pio_err_status_cnt[3];
3105 }
3106
3107 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3108                                          void *context, int vl, int mode,
3109                                          u64 data)
3110 {
3111         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3112
3113         return dd->send_pio_err_status_cnt[2];
3114 }
3115
3116 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3117                                                 void *context, int vl,
3118                                                 int mode, u64 data)
3119 {
3120         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3121
3122         return dd->send_pio_err_status_cnt[1];
3123 }
3124
3125 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3126                                              void *context, int vl, int mode,
3127                                              u64 data)
3128 {
3129         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3130
3131         return dd->send_pio_err_status_cnt[0];
3132 }
3133
3134 /*
3135  * Software counters corresponding to each of the
3136  * error status bits within SendDmaErrStatus
3137  */
3138 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3139                                 const struct cntr_entry *entry,
3140                                 void *context, int vl, int mode, u64 data)
3141 {
3142         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144         return dd->send_dma_err_status_cnt[3];
3145 }
3146
3147 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3148                                 const struct cntr_entry *entry,
3149                                 void *context, int vl, int mode, u64 data)
3150 {
3151         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153         return dd->send_dma_err_status_cnt[2];
3154 }
3155
3156 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3157                                           void *context, int vl, int mode,
3158                                           u64 data)
3159 {
3160         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162         return dd->send_dma_err_status_cnt[1];
3163 }
3164
3165 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3166                                        void *context, int vl, int mode,
3167                                        u64 data)
3168 {
3169         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171         return dd->send_dma_err_status_cnt[0];
3172 }
3173
3174 /*
3175  * Software counters corresponding to each of the
3176  * error status bits within SendEgressErrStatus
3177  */
3178 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3179                                 const struct cntr_entry *entry,
3180                                 void *context, int vl, int mode, u64 data)
3181 {
3182         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183
3184         return dd->send_egress_err_status_cnt[63];
3185 }
3186
3187 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3188                                 const struct cntr_entry *entry,
3189                                 void *context, int vl, int mode, u64 data)
3190 {
3191         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192
3193         return dd->send_egress_err_status_cnt[62];
3194 }
3195
3196 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3197                                              void *context, int vl, int mode,
3198                                              u64 data)
3199 {
3200         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201
3202         return dd->send_egress_err_status_cnt[61];
3203 }
3204
3205 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3206                                                  void *context, int vl,
3207                                                  int mode, u64 data)
3208 {
3209         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210
3211         return dd->send_egress_err_status_cnt[60];
3212 }
3213
3214 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3215                                 const struct cntr_entry *entry,
3216                                 void *context, int vl, int mode, u64 data)
3217 {
3218         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219
3220         return dd->send_egress_err_status_cnt[59];
3221 }
3222
3223 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3224                                         void *context, int vl, int mode,
3225                                         u64 data)
3226 {
3227         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228
3229         return dd->send_egress_err_status_cnt[58];
3230 }
3231
3232 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3233                                             void *context, int vl, int mode,
3234                                             u64 data)
3235 {
3236         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237
3238         return dd->send_egress_err_status_cnt[57];
3239 }
3240
3241 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3242                                               void *context, int vl, int mode,
3243                                               u64 data)
3244 {
3245         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246
3247         return dd->send_egress_err_status_cnt[56];
3248 }
3249
3250 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3251                                               void *context, int vl, int mode,
3252                                               u64 data)
3253 {
3254         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255
3256         return dd->send_egress_err_status_cnt[55];
3257 }
3258
3259 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3260                                               void *context, int vl, int mode,
3261                                               u64 data)
3262 {
3263         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264
3265         return dd->send_egress_err_status_cnt[54];
3266 }
3267
3268 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3269                                               void *context, int vl, int mode,
3270                                               u64 data)
3271 {
3272         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273
3274         return dd->send_egress_err_status_cnt[53];
3275 }
3276
3277 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3278                                               void *context, int vl, int mode,
3279                                               u64 data)
3280 {
3281         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282
3283         return dd->send_egress_err_status_cnt[52];
3284 }
3285
3286 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3287                                               void *context, int vl, int mode,
3288                                               u64 data)
3289 {
3290         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291
3292         return dd->send_egress_err_status_cnt[51];
3293 }
3294
3295 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3296                                               void *context, int vl, int mode,
3297                                               u64 data)
3298 {
3299         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300
3301         return dd->send_egress_err_status_cnt[50];
3302 }
3303
3304 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3305                                               void *context, int vl, int mode,
3306                                               u64 data)
3307 {
3308         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309
3310         return dd->send_egress_err_status_cnt[49];
3311 }
3312
3313 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3314                                               void *context, int vl, int mode,
3315                                               u64 data)
3316 {
3317         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318
3319         return dd->send_egress_err_status_cnt[48];
3320 }
3321
3322 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3323                                               void *context, int vl, int mode,
3324                                               u64 data)
3325 {
3326         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327
3328         return dd->send_egress_err_status_cnt[47];
3329 }
3330
3331 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3332                                             void *context, int vl, int mode,
3333                                             u64 data)
3334 {
3335         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336
3337         return dd->send_egress_err_status_cnt[46];
3338 }
3339
3340 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3341                                              void *context, int vl, int mode,
3342                                              u64 data)
3343 {
3344         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345
3346         return dd->send_egress_err_status_cnt[45];
3347 }
3348
3349 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3350                                                  void *context, int vl,
3351                                                  int mode, u64 data)
3352 {
3353         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354
3355         return dd->send_egress_err_status_cnt[44];
3356 }
3357
3358 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3359                                 const struct cntr_entry *entry,
3360                                 void *context, int vl, int mode, u64 data)
3361 {
3362         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363
3364         return dd->send_egress_err_status_cnt[43];
3365 }
3366
3367 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3368                                         void *context, int vl, int mode,
3369                                         u64 data)
3370 {
3371         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372
3373         return dd->send_egress_err_status_cnt[42];
3374 }
3375
3376 static u64 access_tx_credit_return_partiy_err_cnt(
3377                                 const struct cntr_entry *entry,
3378                                 void *context, int vl, int mode, u64 data)
3379 {
3380         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381
3382         return dd->send_egress_err_status_cnt[41];
3383 }
3384
3385 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3386                                 const struct cntr_entry *entry,
3387                                 void *context, int vl, int mode, u64 data)
3388 {
3389         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390
3391         return dd->send_egress_err_status_cnt[40];
3392 }
3393
3394 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3395                                 const struct cntr_entry *entry,
3396                                 void *context, int vl, int mode, u64 data)
3397 {
3398         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399
3400         return dd->send_egress_err_status_cnt[39];
3401 }
3402
3403 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3404                                 const struct cntr_entry *entry,
3405                                 void *context, int vl, int mode, u64 data)
3406 {
3407         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408
3409         return dd->send_egress_err_status_cnt[38];
3410 }
3411
3412 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3413                                 const struct cntr_entry *entry,
3414                                 void *context, int vl, int mode, u64 data)
3415 {
3416         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417
3418         return dd->send_egress_err_status_cnt[37];
3419 }
3420
3421 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3422                                 const struct cntr_entry *entry,
3423                                 void *context, int vl, int mode, u64 data)
3424 {
3425         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426
3427         return dd->send_egress_err_status_cnt[36];
3428 }
3429
3430 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3431                                 const struct cntr_entry *entry,
3432                                 void *context, int vl, int mode, u64 data)
3433 {
3434         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435
3436         return dd->send_egress_err_status_cnt[35];
3437 }
3438
3439 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3440                                 const struct cntr_entry *entry,
3441                                 void *context, int vl, int mode, u64 data)
3442 {
3443         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444
3445         return dd->send_egress_err_status_cnt[34];
3446 }
3447
3448 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3449                                 const struct cntr_entry *entry,
3450                                 void *context, int vl, int mode, u64 data)
3451 {
3452         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453
3454         return dd->send_egress_err_status_cnt[33];
3455 }
3456
3457 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3458                                 const struct cntr_entry *entry,
3459                                 void *context, int vl, int mode, u64 data)
3460 {
3461         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462
3463         return dd->send_egress_err_status_cnt[32];
3464 }
3465
3466 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3467                                 const struct cntr_entry *entry,
3468                                 void *context, int vl, int mode, u64 data)
3469 {
3470         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471
3472         return dd->send_egress_err_status_cnt[31];
3473 }
3474
3475 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3476                                 const struct cntr_entry *entry,
3477                                 void *context, int vl, int mode, u64 data)
3478 {
3479         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480
3481         return dd->send_egress_err_status_cnt[30];
3482 }
3483
3484 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3485                                 const struct cntr_entry *entry,
3486                                 void *context, int vl, int mode, u64 data)
3487 {
3488         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489
3490         return dd->send_egress_err_status_cnt[29];
3491 }
3492
3493 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3494                                 const struct cntr_entry *entry,
3495                                 void *context, int vl, int mode, u64 data)
3496 {
3497         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498
3499         return dd->send_egress_err_status_cnt[28];
3500 }
3501
3502 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3503                                 const struct cntr_entry *entry,
3504                                 void *context, int vl, int mode, u64 data)
3505 {
3506         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507
3508         return dd->send_egress_err_status_cnt[27];
3509 }
3510
3511 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3512                                 const struct cntr_entry *entry,
3513                                 void *context, int vl, int mode, u64 data)
3514 {
3515         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516
3517         return dd->send_egress_err_status_cnt[26];
3518 }
3519
3520 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3521                                 const struct cntr_entry *entry,
3522                                 void *context, int vl, int mode, u64 data)
3523 {
3524         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525
3526         return dd->send_egress_err_status_cnt[25];
3527 }
3528
3529 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3530                                 const struct cntr_entry *entry,
3531                                 void *context, int vl, int mode, u64 data)
3532 {
3533         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534
3535         return dd->send_egress_err_status_cnt[24];
3536 }
3537
3538 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3539                                 const struct cntr_entry *entry,
3540                                 void *context, int vl, int mode, u64 data)
3541 {
3542         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543
3544         return dd->send_egress_err_status_cnt[23];
3545 }
3546
3547 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3548                                 const struct cntr_entry *entry,
3549                                 void *context, int vl, int mode, u64 data)
3550 {
3551         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552
3553         return dd->send_egress_err_status_cnt[22];
3554 }
3555
3556 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3557                                 const struct cntr_entry *entry,
3558                                 void *context, int vl, int mode, u64 data)
3559 {
3560         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561
3562         return dd->send_egress_err_status_cnt[21];
3563 }
3564
3565 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3566                                 const struct cntr_entry *entry,
3567                                 void *context, int vl, int mode, u64 data)
3568 {
3569         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570
3571         return dd->send_egress_err_status_cnt[20];
3572 }
3573
3574 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3575                                 const struct cntr_entry *entry,
3576                                 void *context, int vl, int mode, u64 data)
3577 {
3578         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579
3580         return dd->send_egress_err_status_cnt[19];
3581 }
3582
3583 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3584                                 const struct cntr_entry *entry,
3585                                 void *context, int vl, int mode, u64 data)
3586 {
3587         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588
3589         return dd->send_egress_err_status_cnt[18];
3590 }
3591
3592 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3593                                 const struct cntr_entry *entry,
3594                                 void *context, int vl, int mode, u64 data)
3595 {
3596         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597
3598         return dd->send_egress_err_status_cnt[17];
3599 }
3600
3601 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3602                                 const struct cntr_entry *entry,
3603                                 void *context, int vl, int mode, u64 data)
3604 {
3605         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606
3607         return dd->send_egress_err_status_cnt[16];
3608 }
3609
3610 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3611                                            void *context, int vl, int mode,
3612                                            u64 data)
3613 {
3614         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615
3616         return dd->send_egress_err_status_cnt[15];
3617 }
3618
3619 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3620                                                  void *context, int vl,
3621                                                  int mode, u64 data)
3622 {
3623         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624
3625         return dd->send_egress_err_status_cnt[14];
3626 }
3627
3628 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3629                                                void *context, int vl, int mode,
3630                                                u64 data)
3631 {
3632         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633
3634         return dd->send_egress_err_status_cnt[13];
3635 }
3636
3637 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3638                                         void *context, int vl, int mode,
3639                                         u64 data)
3640 {
3641         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643         return dd->send_egress_err_status_cnt[12];
3644 }
3645
3646 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3647                                 const struct cntr_entry *entry,
3648                                 void *context, int vl, int mode, u64 data)
3649 {
3650         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652         return dd->send_egress_err_status_cnt[11];
3653 }
3654
3655 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3656                                              void *context, int vl, int mode,
3657                                              u64 data)
3658 {
3659         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661         return dd->send_egress_err_status_cnt[10];
3662 }
3663
3664 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3665                                             void *context, int vl, int mode,
3666                                             u64 data)
3667 {
3668         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669
3670         return dd->send_egress_err_status_cnt[9];
3671 }
3672
3673 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3674                                 const struct cntr_entry *entry,
3675                                 void *context, int vl, int mode, u64 data)
3676 {
3677         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678
3679         return dd->send_egress_err_status_cnt[8];
3680 }
3681
3682 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3683                                 const struct cntr_entry *entry,
3684                                 void *context, int vl, int mode, u64 data)
3685 {
3686         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3687
3688         return dd->send_egress_err_status_cnt[7];
3689 }
3690
3691 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3692                                             void *context, int vl, int mode,
3693                                             u64 data)
3694 {
3695         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3696
3697         return dd->send_egress_err_status_cnt[6];
3698 }
3699
3700 static u64 access_tx_incorrect_link_state_err_cnt(
3701                                 const struct cntr_entry *entry,
3702                                 void *context, int vl, int mode, u64 data)
3703 {
3704         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3705
3706         return dd->send_egress_err_status_cnt[5];
3707 }
3708
3709 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3710                                       void *context, int vl, int mode,
3711                                       u64 data)
3712 {
3713         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3714
3715         return dd->send_egress_err_status_cnt[4];
3716 }
3717
3718 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3719                                 const struct cntr_entry *entry,
3720                                 void *context, int vl, int mode, u64 data)
3721 {
3722         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3723
3724         return dd->send_egress_err_status_cnt[3];
3725 }
3726
3727 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3728                                             void *context, int vl, int mode,
3729                                             u64 data)
3730 {
3731         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3732
3733         return dd->send_egress_err_status_cnt[2];
3734 }
3735
3736 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3737                                 const struct cntr_entry *entry,
3738                                 void *context, int vl, int mode, u64 data)
3739 {
3740         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3741
3742         return dd->send_egress_err_status_cnt[1];
3743 }
3744
3745 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3746                                 const struct cntr_entry *entry,
3747                                 void *context, int vl, int mode, u64 data)
3748 {
3749         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3750
3751         return dd->send_egress_err_status_cnt[0];
3752 }
3753
3754 /*
3755  * Software counters corresponding to each of the
3756  * error status bits within SendErrStatus
3757  */
3758 static u64 access_send_csr_write_bad_addr_err_cnt(
3759                                 const struct cntr_entry *entry,
3760                                 void *context, int vl, int mode, u64 data)
3761 {
3762         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3763
3764         return dd->send_err_status_cnt[2];
3765 }
3766
3767 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3768                                                  void *context, int vl,
3769                                                  int mode, u64 data)
3770 {
3771         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3772
3773         return dd->send_err_status_cnt[1];
3774 }
3775
3776 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3777                                       void *context, int vl, int mode,
3778                                       u64 data)
3779 {
3780         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3781
3782         return dd->send_err_status_cnt[0];
3783 }
3784
3785 /*
3786  * Software counters corresponding to each of the
3787  * error status bits within SendCtxtErrStatus
3788  */
3789 static u64 access_pio_write_out_of_bounds_err_cnt(
3790                                 const struct cntr_entry *entry,
3791                                 void *context, int vl, int mode, u64 data)
3792 {
3793         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795         return dd->sw_ctxt_err_status_cnt[4];
3796 }
3797
3798 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3799                                              void *context, int vl, int mode,
3800                                              u64 data)
3801 {
3802         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804         return dd->sw_ctxt_err_status_cnt[3];
3805 }
3806
3807 static u64 access_pio_write_crosses_boundary_err_cnt(
3808                                 const struct cntr_entry *entry,
3809                                 void *context, int vl, int mode, u64 data)
3810 {
3811         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813         return dd->sw_ctxt_err_status_cnt[2];
3814 }
3815
3816 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3817                                                 void *context, int vl,
3818                                                 int mode, u64 data)
3819 {
3820         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822         return dd->sw_ctxt_err_status_cnt[1];
3823 }
3824
3825 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3826                                                void *context, int vl, int mode,
3827                                                u64 data)
3828 {
3829         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831         return dd->sw_ctxt_err_status_cnt[0];
3832 }
3833
3834 /*
3835  * Software counters corresponding to each of the
3836  * error status bits within SendDmaEngErrStatus
3837  */
3838 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3839                                 const struct cntr_entry *entry,
3840                                 void *context, int vl, int mode, u64 data)
3841 {
3842         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843
3844         return dd->sw_send_dma_eng_err_status_cnt[23];
3845 }
3846
3847 static u64 access_sdma_header_storage_cor_err_cnt(
3848                                 const struct cntr_entry *entry,
3849                                 void *context, int vl, int mode, u64 data)
3850 {
3851         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852
3853         return dd->sw_send_dma_eng_err_status_cnt[22];
3854 }
3855
3856 static u64 access_sdma_packet_tracking_cor_err_cnt(
3857                                 const struct cntr_entry *entry,
3858                                 void *context, int vl, int mode, u64 data)
3859 {
3860         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861
3862         return dd->sw_send_dma_eng_err_status_cnt[21];
3863 }
3864
3865 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3866                                             void *context, int vl, int mode,
3867                                             u64 data)
3868 {
3869         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870
3871         return dd->sw_send_dma_eng_err_status_cnt[20];
3872 }
3873
3874 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3875                                               void *context, int vl, int mode,
3876                                               u64 data)
3877 {
3878         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879
3880         return dd->sw_send_dma_eng_err_status_cnt[19];
3881 }
3882
3883 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3884                                 const struct cntr_entry *entry,
3885                                 void *context, int vl, int mode, u64 data)
3886 {
3887         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888
3889         return dd->sw_send_dma_eng_err_status_cnt[18];
3890 }
3891
3892 static u64 access_sdma_header_storage_unc_err_cnt(
3893                                 const struct cntr_entry *entry,
3894                                 void *context, int vl, int mode, u64 data)
3895 {
3896         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3897
3898         return dd->sw_send_dma_eng_err_status_cnt[17];
3899 }
3900
3901 static u64 access_sdma_packet_tracking_unc_err_cnt(
3902                                 const struct cntr_entry *entry,
3903                                 void *context, int vl, int mode, u64 data)
3904 {
3905         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3906
3907         return dd->sw_send_dma_eng_err_status_cnt[16];
3908 }
3909
3910 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3911                                             void *context, int vl, int mode,
3912                                             u64 data)
3913 {
3914         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3915
3916         return dd->sw_send_dma_eng_err_status_cnt[15];
3917 }
3918
3919 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3920                                               void *context, int vl, int mode,
3921                                               u64 data)
3922 {
3923         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3924
3925         return dd->sw_send_dma_eng_err_status_cnt[14];
3926 }
3927
3928 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3929                                        void *context, int vl, int mode,
3930                                        u64 data)
3931 {
3932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3933
3934         return dd->sw_send_dma_eng_err_status_cnt[13];
3935 }
3936
3937 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3938                                              void *context, int vl, int mode,
3939                                              u64 data)
3940 {
3941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3942
3943         return dd->sw_send_dma_eng_err_status_cnt[12];
3944 }
3945
3946 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3947                                               void *context, int vl, int mode,
3948                                               u64 data)
3949 {
3950         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3951
3952         return dd->sw_send_dma_eng_err_status_cnt[11];
3953 }
3954
3955 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3956                                              void *context, int vl, int mode,
3957                                              u64 data)
3958 {
3959         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3960
3961         return dd->sw_send_dma_eng_err_status_cnt[10];
3962 }
3963
3964 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3965                                           void *context, int vl, int mode,
3966                                           u64 data)
3967 {
3968         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3969
3970         return dd->sw_send_dma_eng_err_status_cnt[9];
3971 }
3972
3973 static u64 access_sdma_packet_desc_overflow_err_cnt(
3974                                 const struct cntr_entry *entry,
3975                                 void *context, int vl, int mode, u64 data)
3976 {
3977         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3978
3979         return dd->sw_send_dma_eng_err_status_cnt[8];
3980 }
3981
3982 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3983                                                void *context, int vl,
3984                                                int mode, u64 data)
3985 {
3986         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3987
3988         return dd->sw_send_dma_eng_err_status_cnt[7];
3989 }
3990
3991 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3992                                     void *context, int vl, int mode, u64 data)
3993 {
3994         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3995
3996         return dd->sw_send_dma_eng_err_status_cnt[6];
3997 }
3998
3999 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
4000                                         void *context, int vl, int mode,
4001                                         u64 data)
4002 {
4003         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4004
4005         return dd->sw_send_dma_eng_err_status_cnt[5];
4006 }
4007
4008 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
4009                                           void *context, int vl, int mode,
4010                                           u64 data)
4011 {
4012         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4013
4014         return dd->sw_send_dma_eng_err_status_cnt[4];
4015 }
4016
4017 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4018                                 const struct cntr_entry *entry,
4019                                 void *context, int vl, int mode, u64 data)
4020 {
4021         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4022
4023         return dd->sw_send_dma_eng_err_status_cnt[3];
4024 }
4025
4026 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4027                                         void *context, int vl, int mode,
4028                                         u64 data)
4029 {
4030         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4031
4032         return dd->sw_send_dma_eng_err_status_cnt[2];
4033 }
4034
4035 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4036                                             void *context, int vl, int mode,
4037                                             u64 data)
4038 {
4039         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4040
4041         return dd->sw_send_dma_eng_err_status_cnt[1];
4042 }
4043
4044 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4045                                         void *context, int vl, int mode,
4046                                         u64 data)
4047 {
4048         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4049
4050         return dd->sw_send_dma_eng_err_status_cnt[0];
4051 }
4052
4053 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4054                                  void *context, int vl, int mode,
4055                                  u64 data)
4056 {
4057         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4058
4059         u64 val = 0;
4060         u64 csr = entry->csr;
4061
4062         val = read_write_csr(dd, csr, mode, data);
4063         if (mode == CNTR_MODE_R) {
4064                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4065                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4066         } else if (mode == CNTR_MODE_W) {
4067                 dd->sw_rcv_bypass_packet_errors = 0;
4068         } else {
4069                 dd_dev_err(dd, "Invalid cntr register access mode");
4070                 return 0;
4071         }
4072         return val;
4073 }
4074
4075 #define def_access_sw_cpu(cntr) \
4076 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4077                               void *context, int vl, int mode, u64 data)      \
4078 {                                                                             \
4079         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4080         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4081                               ppd->ibport_data.rvp.cntr, vl,                  \
4082                               mode, data);                                    \
4083 }
4084
4085 def_access_sw_cpu(rc_acks);
4086 def_access_sw_cpu(rc_qacks);
4087 def_access_sw_cpu(rc_delayed_comp);
4088
4089 #define def_access_ibp_counter(cntr) \
4090 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4091                                 void *context, int vl, int mode, u64 data)    \
4092 {                                                                             \
4093         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4094                                                                               \
4095         if (vl != CNTR_INVALID_VL)                                            \
4096                 return 0;                                                     \
4097                                                                               \
4098         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4099                              mode, data);                                     \
4100 }
4101
4102 def_access_ibp_counter(loop_pkts);
4103 def_access_ibp_counter(rc_resends);
4104 def_access_ibp_counter(rnr_naks);
4105 def_access_ibp_counter(other_naks);
4106 def_access_ibp_counter(rc_timeouts);
4107 def_access_ibp_counter(pkt_drops);
4108 def_access_ibp_counter(dmawait);
4109 def_access_ibp_counter(rc_seqnak);
4110 def_access_ibp_counter(rc_dupreq);
4111 def_access_ibp_counter(rdma_seq);
4112 def_access_ibp_counter(unaligned);
4113 def_access_ibp_counter(seq_naks);
4114
4115 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4116 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4117 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4118                         CNTR_NORMAL),
4119 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4120                         CNTR_NORMAL),
4121 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4122                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4123                         CNTR_NORMAL),
4124 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4125                         CNTR_NORMAL),
4126 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4127                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4128 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4129                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4130 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4131                         CNTR_NORMAL),
4132 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4133                         CNTR_NORMAL),
4134 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4135                         CNTR_NORMAL),
4136 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4137                         CNTR_NORMAL),
4138 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4139                         CNTR_NORMAL),
4140 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4141                         CNTR_NORMAL),
4142 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4143                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4144 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4145                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4146 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4147                               CNTR_SYNTH),
4148 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4149                             access_dc_rcv_err_cnt),
4150 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4151                                  CNTR_SYNTH),
4152 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4153                                   CNTR_SYNTH),
4154 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4155                                   CNTR_SYNTH),
4156 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4157                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4158 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4159                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4160                                   CNTR_SYNTH),
4161 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4162                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4163 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4164                                CNTR_SYNTH),
4165 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4166                               CNTR_SYNTH),
4167 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4168                                CNTR_SYNTH),
4169 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4170                                  CNTR_SYNTH),
4171 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4172                                 CNTR_SYNTH),
4173 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4174                                 CNTR_SYNTH),
4175 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4176                                CNTR_SYNTH),
4177 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4178                                  CNTR_SYNTH | CNTR_VL),
4179 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4180                                 CNTR_SYNTH | CNTR_VL),
4181 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4182 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4183                                  CNTR_SYNTH | CNTR_VL),
4184 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4185 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4186                                  CNTR_SYNTH | CNTR_VL),
4187 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4188                               CNTR_SYNTH),
4189 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4190                                  CNTR_SYNTH | CNTR_VL),
4191 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4192                                 CNTR_SYNTH),
4193 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4194                                    CNTR_SYNTH | CNTR_VL),
4195 [C_DC_TOTAL_CRC] =
4196         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4197                          CNTR_SYNTH),
4198 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4199                                   CNTR_SYNTH),
4200 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4201                                   CNTR_SYNTH),
4202 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4203                                   CNTR_SYNTH),
4204 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4205                                   CNTR_SYNTH),
4206 [C_DC_CRC_MULT_LN] =
4207         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4208                          CNTR_SYNTH),
4209 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4210                                     CNTR_SYNTH),
4211 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4212                                     CNTR_SYNTH),
4213 [C_DC_SEQ_CRC_CNT] =
4214         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4215                          CNTR_SYNTH),
4216 [C_DC_ESC0_ONLY_CNT] =
4217         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4218                          CNTR_SYNTH),
4219 [C_DC_ESC0_PLUS1_CNT] =
4220         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4221                          CNTR_SYNTH),
4222 [C_DC_ESC0_PLUS2_CNT] =
4223         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4224                          CNTR_SYNTH),
4225 [C_DC_REINIT_FROM_PEER_CNT] =
4226         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4227                          CNTR_SYNTH),
4228 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4229                                   CNTR_SYNTH),
4230 [C_DC_MISC_FLG_CNT] =
4231         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4232                          CNTR_SYNTH),
4233 [C_DC_PRF_GOOD_LTP_CNT] =
4234         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4235 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4236         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4237                          CNTR_SYNTH),
4238 [C_DC_PRF_RX_FLIT_CNT] =
4239         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4240 [C_DC_PRF_TX_FLIT_CNT] =
4241         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4242 [C_DC_PRF_CLK_CNTR] =
4243         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4244 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4245         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4246 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4247         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4248                          CNTR_SYNTH),
4249 [C_DC_PG_STS_TX_SBE_CNT] =
4250         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4251 [C_DC_PG_STS_TX_MBE_CNT] =
4252         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4253                          CNTR_SYNTH),
4254 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4255                             access_sw_cpu_intr),
4256 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4257                             access_sw_cpu_rcv_limit),
4258 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4259                             access_sw_ctx0_seq_drop),
4260 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4261                             access_sw_vtx_wait),
4262 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4263                             access_sw_pio_wait),
4264 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4265                             access_sw_pio_drain),
4266 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4267                             access_sw_kmem_wait),
4268 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4269                             access_sw_send_schedule),
4270 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4271                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4272                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4273                                       dev_access_u32_csr),
4274 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4275                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4276                              access_sde_int_cnt),
4277 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4278                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4279                              access_sde_err_cnt),
4280 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4281                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4282                                   access_sde_idle_int_cnt),
4283 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4284                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4285                                       access_sde_progress_int_cnt),
4286 /* MISC_ERR_STATUS */
4287 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4288                                 CNTR_NORMAL,
4289                                 access_misc_pll_lock_fail_err_cnt),
4290 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4291                                 CNTR_NORMAL,
4292                                 access_misc_mbist_fail_err_cnt),
4293 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4294                                 CNTR_NORMAL,
4295                                 access_misc_invalid_eep_cmd_err_cnt),
4296 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4297                                 CNTR_NORMAL,
4298                                 access_misc_efuse_done_parity_err_cnt),
4299 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4300                                 CNTR_NORMAL,
4301                                 access_misc_efuse_write_err_cnt),
4302 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4303                                 0, CNTR_NORMAL,
4304                                 access_misc_efuse_read_bad_addr_err_cnt),
4305 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4306                                 CNTR_NORMAL,
4307                                 access_misc_efuse_csr_parity_err_cnt),
4308 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4309                                 CNTR_NORMAL,
4310                                 access_misc_fw_auth_failed_err_cnt),
4311 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4312                                 CNTR_NORMAL,
4313                                 access_misc_key_mismatch_err_cnt),
4314 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4315                                 CNTR_NORMAL,
4316                                 access_misc_sbus_write_failed_err_cnt),
4317 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4318                                 CNTR_NORMAL,
4319                                 access_misc_csr_write_bad_addr_err_cnt),
4320 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4321                                 CNTR_NORMAL,
4322                                 access_misc_csr_read_bad_addr_err_cnt),
4323 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4324                                 CNTR_NORMAL,
4325                                 access_misc_csr_parity_err_cnt),
4326 /* CceErrStatus */
4327 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4328                                 CNTR_NORMAL,
4329                                 access_sw_cce_err_status_aggregated_cnt),
4330 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4331                                 CNTR_NORMAL,
4332                                 access_cce_msix_csr_parity_err_cnt),
4333 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4334                                 CNTR_NORMAL,
4335                                 access_cce_int_map_unc_err_cnt),
4336 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4337                                 CNTR_NORMAL,
4338                                 access_cce_int_map_cor_err_cnt),
4339 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4340                                 CNTR_NORMAL,
4341                                 access_cce_msix_table_unc_err_cnt),
4342 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4343                                 CNTR_NORMAL,
4344                                 access_cce_msix_table_cor_err_cnt),
4345 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4346                                 0, CNTR_NORMAL,
4347                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4348 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4349                                 0, CNTR_NORMAL,
4350                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4351 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4352                                 CNTR_NORMAL,
4353                                 access_cce_seg_write_bad_addr_err_cnt),
4354 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4355                                 CNTR_NORMAL,
4356                                 access_cce_seg_read_bad_addr_err_cnt),
4357 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4358                                 CNTR_NORMAL,
4359                                 access_la_triggered_cnt),
4360 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4361                                 CNTR_NORMAL,
4362                                 access_cce_trgt_cpl_timeout_err_cnt),
4363 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4364                                 CNTR_NORMAL,
4365                                 access_pcic_receive_parity_err_cnt),
4366 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4367                                 CNTR_NORMAL,
4368                                 access_pcic_transmit_back_parity_err_cnt),
4369 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4370                                 0, CNTR_NORMAL,
4371                                 access_pcic_transmit_front_parity_err_cnt),
4372 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4373                                 CNTR_NORMAL,
4374                                 access_pcic_cpl_dat_q_unc_err_cnt),
4375 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4376                                 CNTR_NORMAL,
4377                                 access_pcic_cpl_hd_q_unc_err_cnt),
4378 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4379                                 CNTR_NORMAL,
4380                                 access_pcic_post_dat_q_unc_err_cnt),
4381 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4382                                 CNTR_NORMAL,
4383                                 access_pcic_post_hd_q_unc_err_cnt),
4384 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4385                                 CNTR_NORMAL,
4386                                 access_pcic_retry_sot_mem_unc_err_cnt),
4387 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4388                                 CNTR_NORMAL,
4389                                 access_pcic_retry_mem_unc_err),
4390 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4391                                 CNTR_NORMAL,
4392                                 access_pcic_n_post_dat_q_parity_err_cnt),
4393 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4394                                 CNTR_NORMAL,
4395                                 access_pcic_n_post_h_q_parity_err_cnt),
4396 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4397                                 CNTR_NORMAL,
4398                                 access_pcic_cpl_dat_q_cor_err_cnt),
4399 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4400                                 CNTR_NORMAL,
4401                                 access_pcic_cpl_hd_q_cor_err_cnt),
4402 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4403                                 CNTR_NORMAL,
4404                                 access_pcic_post_dat_q_cor_err_cnt),
4405 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4406                                 CNTR_NORMAL,
4407                                 access_pcic_post_hd_q_cor_err_cnt),
4408 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4409                                 CNTR_NORMAL,
4410                                 access_pcic_retry_sot_mem_cor_err_cnt),
4411 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4412                                 CNTR_NORMAL,
4413                                 access_pcic_retry_mem_cor_err_cnt),
4414 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4415                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4416                                 CNTR_NORMAL,
4417                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4418 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4419                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4420                                 CNTR_NORMAL,
4421                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4422                                 ),
4423 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4424                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4425                         CNTR_NORMAL,
4426                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4427 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4428                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4429                         CNTR_NORMAL,
4430                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4431 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4432                         0, CNTR_NORMAL,
4433                         access_cce_cli2_async_fifo_parity_err_cnt),
4434 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4435                         CNTR_NORMAL,
4436                         access_cce_csr_cfg_bus_parity_err_cnt),
4437 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4438                         0, CNTR_NORMAL,
4439                         access_cce_cli0_async_fifo_parity_err_cnt),
4440 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4441                         CNTR_NORMAL,
4442                         access_cce_rspd_data_parity_err_cnt),
4443 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4444                         CNTR_NORMAL,
4445                         access_cce_trgt_access_err_cnt),
4446 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4447                         0, CNTR_NORMAL,
4448                         access_cce_trgt_async_fifo_parity_err_cnt),
4449 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4450                         CNTR_NORMAL,
4451                         access_cce_csr_write_bad_addr_err_cnt),
4452 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4453                         CNTR_NORMAL,
4454                         access_cce_csr_read_bad_addr_err_cnt),
4455 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4456                         CNTR_NORMAL,
4457                         access_ccs_csr_parity_err_cnt),
4458
4459 /* RcvErrStatus */
4460 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4461                         CNTR_NORMAL,
4462                         access_rx_csr_parity_err_cnt),
4463 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4464                         CNTR_NORMAL,
4465                         access_rx_csr_write_bad_addr_err_cnt),
4466 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4467                         CNTR_NORMAL,
4468                         access_rx_csr_read_bad_addr_err_cnt),
4469 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4470                         CNTR_NORMAL,
4471                         access_rx_dma_csr_unc_err_cnt),
4472 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4473                         CNTR_NORMAL,
4474                         access_rx_dma_dq_fsm_encoding_err_cnt),
4475 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4476                         CNTR_NORMAL,
4477                         access_rx_dma_eq_fsm_encoding_err_cnt),
4478 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4479                         CNTR_NORMAL,
4480                         access_rx_dma_csr_parity_err_cnt),
4481 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4482                         CNTR_NORMAL,
4483                         access_rx_rbuf_data_cor_err_cnt),
4484 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4485                         CNTR_NORMAL,
4486                         access_rx_rbuf_data_unc_err_cnt),
4487 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4488                         CNTR_NORMAL,
4489                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4490 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4491                         CNTR_NORMAL,
4492                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4493 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4494                         CNTR_NORMAL,
4495                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4496 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4497                         CNTR_NORMAL,
4498                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4499 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4500                         CNTR_NORMAL,
4501                         access_rx_rbuf_desc_part2_cor_err_cnt),
4502 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4503                         CNTR_NORMAL,
4504                         access_rx_rbuf_desc_part2_unc_err_cnt),
4505 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4506                         CNTR_NORMAL,
4507                         access_rx_rbuf_desc_part1_cor_err_cnt),
4508 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4509                         CNTR_NORMAL,
4510                         access_rx_rbuf_desc_part1_unc_err_cnt),
4511 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4512                         CNTR_NORMAL,
4513                         access_rx_hq_intr_fsm_err_cnt),
4514 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4515                         CNTR_NORMAL,
4516                         access_rx_hq_intr_csr_parity_err_cnt),
4517 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4518                         CNTR_NORMAL,
4519                         access_rx_lookup_csr_parity_err_cnt),
4520 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4521                         CNTR_NORMAL,
4522                         access_rx_lookup_rcv_array_cor_err_cnt),
4523 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4524                         CNTR_NORMAL,
4525                         access_rx_lookup_rcv_array_unc_err_cnt),
4526 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4527                         0, CNTR_NORMAL,
4528                         access_rx_lookup_des_part2_parity_err_cnt),
4529 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4530                         0, CNTR_NORMAL,
4531                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4532 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4533                         CNTR_NORMAL,
4534                         access_rx_lookup_des_part1_unc_err_cnt),
4535 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4536                         CNTR_NORMAL,
4537                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4538 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4539                         CNTR_NORMAL,
4540                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4541 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4542                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4543                         CNTR_NORMAL,
4544                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4545 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4546                         0, CNTR_NORMAL,
4547                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4548 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4549                         0, CNTR_NORMAL,
4550                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4551 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4552                         CNTR_NORMAL,
4553                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4554 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4555                         CNTR_NORMAL,
4556                         access_rx_rbuf_empty_err_cnt),
4557 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4558                         CNTR_NORMAL,
4559                         access_rx_rbuf_full_err_cnt),
4560 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4561                         CNTR_NORMAL,
4562                         access_rbuf_bad_lookup_err_cnt),
4563 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4564                         CNTR_NORMAL,
4565                         access_rbuf_ctx_id_parity_err_cnt),
4566 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4567                         CNTR_NORMAL,
4568                         access_rbuf_csr_qeopdw_parity_err_cnt),
4569 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4570                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4571                         CNTR_NORMAL,
4572                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4574                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4575                         CNTR_NORMAL,
4576                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4577 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4578                         0, CNTR_NORMAL,
4579                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4580 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4581                         0, CNTR_NORMAL,
4582                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4583 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4584                         0, 0, CNTR_NORMAL,
4585                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4586 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4587                         0, CNTR_NORMAL,
4588                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4589 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4590                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4591                         CNTR_NORMAL,
4592                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4593 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4594                         0, CNTR_NORMAL,
4595                         access_rx_rbuf_block_list_read_cor_err_cnt),
4596 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4597                         0, CNTR_NORMAL,
4598                         access_rx_rbuf_block_list_read_unc_err_cnt),
4599 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4600                         CNTR_NORMAL,
4601                         access_rx_rbuf_lookup_des_cor_err_cnt),
4602 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4603                         CNTR_NORMAL,
4604                         access_rx_rbuf_lookup_des_unc_err_cnt),
4605 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4606                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4607                         CNTR_NORMAL,
4608                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4609 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4610                         CNTR_NORMAL,
4611                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4612 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4613                         CNTR_NORMAL,
4614                         access_rx_rbuf_free_list_cor_err_cnt),
4615 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4616                         CNTR_NORMAL,
4617                         access_rx_rbuf_free_list_unc_err_cnt),
4618 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4619                         CNTR_NORMAL,
4620                         access_rx_rcv_fsm_encoding_err_cnt),
4621 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4622                         CNTR_NORMAL,
4623                         access_rx_dma_flag_cor_err_cnt),
4624 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4625                         CNTR_NORMAL,
4626                         access_rx_dma_flag_unc_err_cnt),
4627 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4628                         CNTR_NORMAL,
4629                         access_rx_dc_sop_eop_parity_err_cnt),
4630 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4631                         CNTR_NORMAL,
4632                         access_rx_rcv_csr_parity_err_cnt),
4633 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4634                         CNTR_NORMAL,
4635                         access_rx_rcv_qp_map_table_cor_err_cnt),
4636 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4637                         CNTR_NORMAL,
4638                         access_rx_rcv_qp_map_table_unc_err_cnt),
4639 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4640                         CNTR_NORMAL,
4641                         access_rx_rcv_data_cor_err_cnt),
4642 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4643                         CNTR_NORMAL,
4644                         access_rx_rcv_data_unc_err_cnt),
4645 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4646                         CNTR_NORMAL,
4647                         access_rx_rcv_hdr_cor_err_cnt),
4648 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4649                         CNTR_NORMAL,
4650                         access_rx_rcv_hdr_unc_err_cnt),
4651 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4652                         CNTR_NORMAL,
4653                         access_rx_dc_intf_parity_err_cnt),
4654 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4655                         CNTR_NORMAL,
4656                         access_rx_dma_csr_cor_err_cnt),
4657 /* SendPioErrStatus */
4658 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4659                         CNTR_NORMAL,
4660                         access_pio_pec_sop_head_parity_err_cnt),
4661 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4662                         CNTR_NORMAL,
4663                         access_pio_pcc_sop_head_parity_err_cnt),
4664 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4665                         0, 0, CNTR_NORMAL,
4666                         access_pio_last_returned_cnt_parity_err_cnt),
4667 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4668                         0, CNTR_NORMAL,
4669                         access_pio_current_free_cnt_parity_err_cnt),
4670 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4671                         CNTR_NORMAL,
4672                         access_pio_reserved_31_err_cnt),
4673 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4674                         CNTR_NORMAL,
4675                         access_pio_reserved_30_err_cnt),
4676 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4677                         CNTR_NORMAL,
4678                         access_pio_ppmc_sop_len_err_cnt),
4679 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4680                         CNTR_NORMAL,
4681                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4682 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4683                         CNTR_NORMAL,
4684                         access_pio_vl_fifo_parity_err_cnt),
4685 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4686                         CNTR_NORMAL,
4687                         access_pio_vlf_sop_parity_err_cnt),
4688 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4689                         CNTR_NORMAL,
4690                         access_pio_vlf_v1_len_parity_err_cnt),
4691 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4692                         CNTR_NORMAL,
4693                         access_pio_block_qw_count_parity_err_cnt),
4694 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4695                         CNTR_NORMAL,
4696                         access_pio_write_qw_valid_parity_err_cnt),
4697 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4698                         CNTR_NORMAL,
4699                         access_pio_state_machine_err_cnt),
4700 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4701                         CNTR_NORMAL,
4702                         access_pio_write_data_parity_err_cnt),
4703 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4704                         CNTR_NORMAL,
4705                         access_pio_host_addr_mem_cor_err_cnt),
4706 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4707                         CNTR_NORMAL,
4708                         access_pio_host_addr_mem_unc_err_cnt),
4709 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4710                         CNTR_NORMAL,
4711                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4712 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4713                         CNTR_NORMAL,
4714                         access_pio_init_sm_in_err_cnt),
4715 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4716                         CNTR_NORMAL,
4717                         access_pio_ppmc_pbl_fifo_err_cnt),
4718 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4719                         0, CNTR_NORMAL,
4720                         access_pio_credit_ret_fifo_parity_err_cnt),
4721 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4722                         CNTR_NORMAL,
4723                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4724 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4725                         CNTR_NORMAL,
4726                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4727 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4728                         CNTR_NORMAL,
4729                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4730 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4731                         CNTR_NORMAL,
4732                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4733 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4734                         CNTR_NORMAL,
4735                         access_pio_sm_pkt_reset_parity_err_cnt),
4736 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4737                         CNTR_NORMAL,
4738                         access_pio_pkt_evict_fifo_parity_err_cnt),
4739 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4740                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4741                         CNTR_NORMAL,
4742                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4743 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4744                         CNTR_NORMAL,
4745                         access_pio_sbrdctl_crrel_parity_err_cnt),
4746 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4747                         CNTR_NORMAL,
4748                         access_pio_pec_fifo_parity_err_cnt),
4749 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4750                         CNTR_NORMAL,
4751                         access_pio_pcc_fifo_parity_err_cnt),
4752 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4753                         CNTR_NORMAL,
4754                         access_pio_sb_mem_fifo1_err_cnt),
4755 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4756                         CNTR_NORMAL,
4757                         access_pio_sb_mem_fifo0_err_cnt),
4758 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4759                         CNTR_NORMAL,
4760                         access_pio_csr_parity_err_cnt),
4761 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4762                         CNTR_NORMAL,
4763                         access_pio_write_addr_parity_err_cnt),
4764 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4765                         CNTR_NORMAL,
4766                         access_pio_write_bad_ctxt_err_cnt),
4767 /* SendDmaErrStatus */
4768 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4769                         0, CNTR_NORMAL,
4770                         access_sdma_pcie_req_tracking_cor_err_cnt),
4771 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4772                         0, CNTR_NORMAL,
4773                         access_sdma_pcie_req_tracking_unc_err_cnt),
4774 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4775                         CNTR_NORMAL,
4776                         access_sdma_csr_parity_err_cnt),
4777 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4778                         CNTR_NORMAL,
4779                         access_sdma_rpy_tag_err_cnt),
4780 /* SendEgressErrStatus */
4781 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4782                         CNTR_NORMAL,
4783                         access_tx_read_pio_memory_csr_unc_err_cnt),
4784 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4785                         0, CNTR_NORMAL,
4786                         access_tx_read_sdma_memory_csr_err_cnt),
4787 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4788                         CNTR_NORMAL,
4789                         access_tx_egress_fifo_cor_err_cnt),
4790 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4791                         CNTR_NORMAL,
4792                         access_tx_read_pio_memory_cor_err_cnt),
4793 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4794                         CNTR_NORMAL,
4795                         access_tx_read_sdma_memory_cor_err_cnt),
4796 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4797                         CNTR_NORMAL,
4798                         access_tx_sb_hdr_cor_err_cnt),
4799 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4800                         CNTR_NORMAL,
4801                         access_tx_credit_overrun_err_cnt),
4802 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4803                         CNTR_NORMAL,
4804                         access_tx_launch_fifo8_cor_err_cnt),
4805 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4806                         CNTR_NORMAL,
4807                         access_tx_launch_fifo7_cor_err_cnt),
4808 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4809                         CNTR_NORMAL,
4810                         access_tx_launch_fifo6_cor_err_cnt),
4811 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4812                         CNTR_NORMAL,
4813                         access_tx_launch_fifo5_cor_err_cnt),
4814 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4815                         CNTR_NORMAL,
4816                         access_tx_launch_fifo4_cor_err_cnt),
4817 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4818                         CNTR_NORMAL,
4819                         access_tx_launch_fifo3_cor_err_cnt),
4820 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4821                         CNTR_NORMAL,
4822                         access_tx_launch_fifo2_cor_err_cnt),
4823 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4824                         CNTR_NORMAL,
4825                         access_tx_launch_fifo1_cor_err_cnt),
4826 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4827                         CNTR_NORMAL,
4828                         access_tx_launch_fifo0_cor_err_cnt),
4829 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4830                         CNTR_NORMAL,
4831                         access_tx_credit_return_vl_err_cnt),
4832 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4833                         CNTR_NORMAL,
4834                         access_tx_hcrc_insertion_err_cnt),
4835 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4836                         CNTR_NORMAL,
4837                         access_tx_egress_fifo_unc_err_cnt),
4838 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4839                         CNTR_NORMAL,
4840                         access_tx_read_pio_memory_unc_err_cnt),
4841 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4842                         CNTR_NORMAL,
4843                         access_tx_read_sdma_memory_unc_err_cnt),
4844 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4845                         CNTR_NORMAL,
4846                         access_tx_sb_hdr_unc_err_cnt),
4847 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4848                         CNTR_NORMAL,
4849                         access_tx_credit_return_partiy_err_cnt),
4850 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4851                         0, 0, CNTR_NORMAL,
4852                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4853 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4854                         0, 0, CNTR_NORMAL,
4855                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4856 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4857                         0, 0, CNTR_NORMAL,
4858                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4859 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4860                         0, 0, CNTR_NORMAL,
4861                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4862 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4863                         0, 0, CNTR_NORMAL,
4864                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4865 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4866                         0, 0, CNTR_NORMAL,
4867                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4868 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4869                         0, 0, CNTR_NORMAL,
4870                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4871 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4872                         0, 0, CNTR_NORMAL,
4873                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4874 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4875                         0, 0, CNTR_NORMAL,
4876                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4877 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4878                         0, 0, CNTR_NORMAL,
4879                         access_tx_sdma15_disallowed_packet_err_cnt),
4880 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4881                         0, 0, CNTR_NORMAL,
4882                         access_tx_sdma14_disallowed_packet_err_cnt),
4883 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4884                         0, 0, CNTR_NORMAL,
4885                         access_tx_sdma13_disallowed_packet_err_cnt),
4886 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4887                         0, 0, CNTR_NORMAL,
4888                         access_tx_sdma12_disallowed_packet_err_cnt),
4889 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4890                         0, 0, CNTR_NORMAL,
4891                         access_tx_sdma11_disallowed_packet_err_cnt),
4892 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4893                         0, 0, CNTR_NORMAL,
4894                         access_tx_sdma10_disallowed_packet_err_cnt),
4895 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4896                         0, 0, CNTR_NORMAL,
4897                         access_tx_sdma9_disallowed_packet_err_cnt),
4898 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4899                         0, 0, CNTR_NORMAL,
4900                         access_tx_sdma8_disallowed_packet_err_cnt),
4901 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4902                         0, 0, CNTR_NORMAL,
4903                         access_tx_sdma7_disallowed_packet_err_cnt),
4904 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4905                         0, 0, CNTR_NORMAL,
4906                         access_tx_sdma6_disallowed_packet_err_cnt),
4907 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4908                         0, 0, CNTR_NORMAL,
4909                         access_tx_sdma5_disallowed_packet_err_cnt),
4910 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4911                         0, 0, CNTR_NORMAL,
4912                         access_tx_sdma4_disallowed_packet_err_cnt),
4913 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4914                         0, 0, CNTR_NORMAL,
4915                         access_tx_sdma3_disallowed_packet_err_cnt),
4916 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4917                         0, 0, CNTR_NORMAL,
4918                         access_tx_sdma2_disallowed_packet_err_cnt),
4919 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4920                         0, 0, CNTR_NORMAL,
4921                         access_tx_sdma1_disallowed_packet_err_cnt),
4922 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4923                         0, 0, CNTR_NORMAL,
4924                         access_tx_sdma0_disallowed_packet_err_cnt),
4925 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4926                         CNTR_NORMAL,
4927                         access_tx_config_parity_err_cnt),
4928 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4929                         CNTR_NORMAL,
4930                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4931 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4932                         CNTR_NORMAL,
4933                         access_tx_launch_csr_parity_err_cnt),
4934 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4935                         CNTR_NORMAL,
4936                         access_tx_illegal_vl_err_cnt),
4937 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4938                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4939                         CNTR_NORMAL,
4940                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4941 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4942                         CNTR_NORMAL,
4943                         access_egress_reserved_10_err_cnt),
4944 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4945                         CNTR_NORMAL,
4946                         access_egress_reserved_9_err_cnt),
4947 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4948                         0, 0, CNTR_NORMAL,
4949                         access_tx_sdma_launch_intf_parity_err_cnt),
4950 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4951                         CNTR_NORMAL,
4952                         access_tx_pio_launch_intf_parity_err_cnt),
4953 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4954                         CNTR_NORMAL,
4955                         access_egress_reserved_6_err_cnt),
4956 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4957                         CNTR_NORMAL,
4958                         access_tx_incorrect_link_state_err_cnt),
4959 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4960                         CNTR_NORMAL,
4961                         access_tx_linkdown_err_cnt),
4962 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4963                         "EgressFifoUnderrunOrParityErr", 0, 0,
4964                         CNTR_NORMAL,
4965                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4966 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4967                         CNTR_NORMAL,
4968                         access_egress_reserved_2_err_cnt),
4969 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4970                         CNTR_NORMAL,
4971                         access_tx_pkt_integrity_mem_unc_err_cnt),
4972 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4973                         CNTR_NORMAL,
4974                         access_tx_pkt_integrity_mem_cor_err_cnt),
4975 /* SendErrStatus */
4976 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4977                         CNTR_NORMAL,
4978                         access_send_csr_write_bad_addr_err_cnt),
4979 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4980                         CNTR_NORMAL,
4981                         access_send_csr_read_bad_addr_err_cnt),
4982 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4983                         CNTR_NORMAL,
4984                         access_send_csr_parity_cnt),
4985 /* SendCtxtErrStatus */
4986 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4987                         CNTR_NORMAL,
4988                         access_pio_write_out_of_bounds_err_cnt),
4989 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4990                         CNTR_NORMAL,
4991                         access_pio_write_overflow_err_cnt),
4992 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4993                         0, 0, CNTR_NORMAL,
4994                         access_pio_write_crosses_boundary_err_cnt),
4995 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4996                         CNTR_NORMAL,
4997                         access_pio_disallowed_packet_err_cnt),
4998 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4999                         CNTR_NORMAL,
5000                         access_pio_inconsistent_sop_err_cnt),
5001 /* SendDmaEngErrStatus */
5002 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
5003                         0, 0, CNTR_NORMAL,
5004                         access_sdma_header_request_fifo_cor_err_cnt),
5005 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5006                         CNTR_NORMAL,
5007                         access_sdma_header_storage_cor_err_cnt),
5008 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5009                         CNTR_NORMAL,
5010                         access_sdma_packet_tracking_cor_err_cnt),
5011 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5012                         CNTR_NORMAL,
5013                         access_sdma_assembly_cor_err_cnt),
5014 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5015                         CNTR_NORMAL,
5016                         access_sdma_desc_table_cor_err_cnt),
5017 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5018                         0, 0, CNTR_NORMAL,
5019                         access_sdma_header_request_fifo_unc_err_cnt),
5020 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5021                         CNTR_NORMAL,
5022                         access_sdma_header_storage_unc_err_cnt),
5023 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5024                         CNTR_NORMAL,
5025                         access_sdma_packet_tracking_unc_err_cnt),
5026 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5027                         CNTR_NORMAL,
5028                         access_sdma_assembly_unc_err_cnt),
5029 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5030                         CNTR_NORMAL,
5031                         access_sdma_desc_table_unc_err_cnt),
5032 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5033                         CNTR_NORMAL,
5034                         access_sdma_timeout_err_cnt),
5035 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5036                         CNTR_NORMAL,
5037                         access_sdma_header_length_err_cnt),
5038 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5039                         CNTR_NORMAL,
5040                         access_sdma_header_address_err_cnt),
5041 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5042                         CNTR_NORMAL,
5043                         access_sdma_header_select_err_cnt),
5044 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5045                         CNTR_NORMAL,
5046                         access_sdma_reserved_9_err_cnt),
5047 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5048                         CNTR_NORMAL,
5049                         access_sdma_packet_desc_overflow_err_cnt),
5050 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5051                         CNTR_NORMAL,
5052                         access_sdma_length_mismatch_err_cnt),
5053 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5054                         CNTR_NORMAL,
5055                         access_sdma_halt_err_cnt),
5056 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5057                         CNTR_NORMAL,
5058                         access_sdma_mem_read_err_cnt),
5059 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5060                         CNTR_NORMAL,
5061                         access_sdma_first_desc_err_cnt),
5062 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5063                         CNTR_NORMAL,
5064                         access_sdma_tail_out_of_bounds_err_cnt),
5065 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5066                         CNTR_NORMAL,
5067                         access_sdma_too_long_err_cnt),
5068 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5069                         CNTR_NORMAL,
5070                         access_sdma_gen_mismatch_err_cnt),
5071 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5072                         CNTR_NORMAL,
5073                         access_sdma_wrong_dw_err_cnt),
5074 };
5075
5076 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5077 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5078                         CNTR_NORMAL),
5079 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5080                         CNTR_NORMAL),
5081 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5082                         CNTR_NORMAL),
5083 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5084                         CNTR_NORMAL),
5085 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5086                         CNTR_NORMAL),
5087 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5088                         CNTR_NORMAL),
5089 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5090                         CNTR_NORMAL),
5091 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5092 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5093 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5094 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5095                                       CNTR_SYNTH | CNTR_VL),
5096 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5097                                      CNTR_SYNTH | CNTR_VL),
5098 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5099                                       CNTR_SYNTH | CNTR_VL),
5100 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5101 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5102 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5103                              access_sw_link_dn_cnt),
5104 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5105                            access_sw_link_up_cnt),
5106 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5107                                  access_sw_unknown_frame_cnt),
5108 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5109                              access_sw_xmit_discards),
5110 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5111                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5112                                 access_sw_xmit_discards),
5113 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5114                                  access_xmit_constraint_errs),
5115 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5116                                 access_rcv_constraint_errs),
5117 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5118 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5119 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5120 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5121 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5122 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5123 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5124 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5125 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5126 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5127 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5128 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5129 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5130                                access_sw_cpu_rc_acks),
5131 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5132                                 access_sw_cpu_rc_qacks),
5133 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5134                                        access_sw_cpu_rc_delayed_comp),
5135 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5136 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5137 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5138 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5139 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5140 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5141 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5142 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5143 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5144 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5145 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5146 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5147 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5148 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5149 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5150 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5151 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5152 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5153 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5154 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5155 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5156 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5157 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5158 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5159 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5160 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5161 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5162 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5163 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5164 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5165 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5166 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5167 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5168 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5169 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5170 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5171 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5172 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5173 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5174 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5175 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5176 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5177 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5178 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5179 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5180 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5181 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5182 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5183 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5184 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5185 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5186 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5187 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5188 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5189 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5190 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5191 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5192 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5193 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5194 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5195 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5196 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5197 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5198 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5199 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5200 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5201 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5202 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5203 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5204 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5205 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5206 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5207 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5208 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5209 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5210 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5211 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5212 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5213 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5214 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5215 };
5216
5217 /* ======================================================================== */
5218
5219 /* return true if this is chip revision revision a */
5220 int is_ax(struct hfi1_devdata *dd)
5221 {
5222         u8 chip_rev_minor =
5223                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5224                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5225         return (chip_rev_minor & 0xf0) == 0;
5226 }
5227
5228 /* return true if this is chip revision revision b */
5229 int is_bx(struct hfi1_devdata *dd)
5230 {
5231         u8 chip_rev_minor =
5232                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5233                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5234         return (chip_rev_minor & 0xF0) == 0x10;
5235 }
5236
5237 /*
5238  * Append string s to buffer buf.  Arguments curp and len are the current
5239  * position and remaining length, respectively.
5240  *
5241  * return 0 on success, 1 on out of room
5242  */
5243 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5244 {
5245         char *p = *curp;
5246         int len = *lenp;
5247         int result = 0; /* success */
5248         char c;
5249
5250         /* add a comma, if first in the buffer */
5251         if (p != buf) {
5252                 if (len == 0) {
5253                         result = 1; /* out of room */
5254                         goto done;
5255                 }
5256                 *p++ = ',';
5257                 len--;
5258         }
5259
5260         /* copy the string */
5261         while ((c = *s++) != 0) {
5262                 if (len == 0) {
5263                         result = 1; /* out of room */
5264                         goto done;
5265                 }
5266                 *p++ = c;
5267                 len--;
5268         }
5269
5270 done:
5271         /* write return values */
5272         *curp = p;
5273         *lenp = len;
5274
5275         return result;
5276 }
5277
5278 /*
5279  * Using the given flag table, print a comma separated string into
5280  * the buffer.  End in '*' if the buffer is too short.
5281  */
5282 static char *flag_string(char *buf, int buf_len, u64 flags,
5283                          struct flag_table *table, int table_size)
5284 {
5285         char extra[32];
5286         char *p = buf;
5287         int len = buf_len;
5288         int no_room = 0;
5289         int i;
5290
5291         /* make sure there is at least 2 so we can form "*" */
5292         if (len < 2)
5293                 return "";
5294
5295         len--;  /* leave room for a nul */
5296         for (i = 0; i < table_size; i++) {
5297                 if (flags & table[i].flag) {
5298                         no_room = append_str(buf, &p, &len, table[i].str);
5299                         if (no_room)
5300                                 break;
5301                         flags &= ~table[i].flag;
5302                 }
5303         }
5304
5305         /* any undocumented bits left? */
5306         if (!no_room && flags) {
5307                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5308                 no_room = append_str(buf, &p, &len, extra);
5309         }
5310
5311         /* add * if ran out of room */
5312         if (no_room) {
5313                 /* may need to back up to add space for a '*' */
5314                 if (len == 0)
5315                         --p;
5316                 *p++ = '*';
5317         }
5318
5319         /* add final nul - space already allocated above */
5320         *p = 0;
5321         return buf;
5322 }
5323
5324 /* first 8 CCE error interrupt source names */
5325 static const char * const cce_misc_names[] = {
5326         "CceErrInt",            /* 0 */
5327         "RxeErrInt",            /* 1 */
5328         "MiscErrInt",           /* 2 */
5329         "Reserved3",            /* 3 */
5330         "PioErrInt",            /* 4 */
5331         "SDmaErrInt",           /* 5 */
5332         "EgressErrInt",         /* 6 */
5333         "TxeErrInt"             /* 7 */
5334 };
5335
5336 /*
5337  * Return the miscellaneous error interrupt name.
5338  */
5339 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5340 {
5341         if (source < ARRAY_SIZE(cce_misc_names))
5342                 strncpy(buf, cce_misc_names[source], bsize);
5343         else
5344                 snprintf(buf, bsize, "Reserved%u",
5345                          source + IS_GENERAL_ERR_START);
5346
5347         return buf;
5348 }
5349
5350 /*
5351  * Return the SDMA engine error interrupt name.
5352  */
5353 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5354 {
5355         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5356         return buf;
5357 }
5358
5359 /*
5360  * Return the send context error interrupt name.
5361  */
5362 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5363 {
5364         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5365         return buf;
5366 }
5367
5368 static const char * const various_names[] = {
5369         "PbcInt",
5370         "GpioAssertInt",
5371         "Qsfp1Int",
5372         "Qsfp2Int",
5373         "TCritInt"
5374 };
5375
5376 /*
5377  * Return the various interrupt name.
5378  */
5379 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5380 {
5381         if (source < ARRAY_SIZE(various_names))
5382                 strncpy(buf, various_names[source], bsize);
5383         else
5384                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5385         return buf;
5386 }
5387
5388 /*
5389  * Return the DC interrupt name.
5390  */
5391 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5392 {
5393         static const char * const dc_int_names[] = {
5394                 "common",
5395                 "lcb",
5396                 "8051",
5397                 "lbm"   /* local block merge */
5398         };
5399
5400         if (source < ARRAY_SIZE(dc_int_names))
5401                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5402         else
5403                 snprintf(buf, bsize, "DCInt%u", source);
5404         return buf;
5405 }
5406
5407 static const char * const sdma_int_names[] = {
5408         "SDmaInt",
5409         "SdmaIdleInt",
5410         "SdmaProgressInt",
5411 };
5412
5413 /*
5414  * Return the SDMA engine interrupt name.
5415  */
5416 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5417 {
5418         /* what interrupt */
5419         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5420         /* which engine */
5421         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5422
5423         if (likely(what < 3))
5424                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5425         else
5426                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5427         return buf;
5428 }
5429
5430 /*
5431  * Return the receive available interrupt name.
5432  */
5433 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5434 {
5435         snprintf(buf, bsize, "RcvAvailInt%u", source);
5436         return buf;
5437 }
5438
5439 /*
5440  * Return the receive urgent interrupt name.
5441  */
5442 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5443 {
5444         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5445         return buf;
5446 }
5447
5448 /*
5449  * Return the send credit interrupt name.
5450  */
5451 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5452 {
5453         snprintf(buf, bsize, "SendCreditInt%u", source);
5454         return buf;
5455 }
5456
5457 /*
5458  * Return the reserved interrupt name.
5459  */
5460 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5461 {
5462         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5463         return buf;
5464 }
5465
5466 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5467 {
5468         return flag_string(buf, buf_len, flags,
5469                            cce_err_status_flags,
5470                            ARRAY_SIZE(cce_err_status_flags));
5471 }
5472
5473 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5474 {
5475         return flag_string(buf, buf_len, flags,
5476                            rxe_err_status_flags,
5477                            ARRAY_SIZE(rxe_err_status_flags));
5478 }
5479
5480 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5481 {
5482         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5483                            ARRAY_SIZE(misc_err_status_flags));
5484 }
5485
5486 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5487 {
5488         return flag_string(buf, buf_len, flags,
5489                            pio_err_status_flags,
5490                            ARRAY_SIZE(pio_err_status_flags));
5491 }
5492
5493 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5494 {
5495         return flag_string(buf, buf_len, flags,
5496                            sdma_err_status_flags,
5497                            ARRAY_SIZE(sdma_err_status_flags));
5498 }
5499
5500 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5501 {
5502         return flag_string(buf, buf_len, flags,
5503                            egress_err_status_flags,
5504                            ARRAY_SIZE(egress_err_status_flags));
5505 }
5506
5507 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5508 {
5509         return flag_string(buf, buf_len, flags,
5510                            egress_err_info_flags,
5511                            ARRAY_SIZE(egress_err_info_flags));
5512 }
5513
5514 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5515 {
5516         return flag_string(buf, buf_len, flags,
5517                            send_err_status_flags,
5518                            ARRAY_SIZE(send_err_status_flags));
5519 }
5520
5521 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5522 {
5523         char buf[96];
5524         int i = 0;
5525
5526         /*
5527          * For most these errors, there is nothing that can be done except
5528          * report or record it.
5529          */
5530         dd_dev_info(dd, "CCE Error: %s\n",
5531                     cce_err_status_string(buf, sizeof(buf), reg));
5532
5533         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5534             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5535                 /* this error requires a manual drop into SPC freeze mode */
5536                 /* then a fix up */
5537                 start_freeze_handling(dd->pport, FREEZE_SELF);
5538         }
5539
5540         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5541                 if (reg & (1ull << i)) {
5542                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5543                         /* maintain a counter over all cce_err_status errors */
5544                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5545                 }
5546         }
5547 }
5548
5549 /*
5550  * Check counters for receive errors that do not have an interrupt
5551  * associated with them.
5552  */
5553 #define RCVERR_CHECK_TIME 10
5554 static void update_rcverr_timer(struct timer_list *t)
5555 {
5556         struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5557         struct hfi1_pportdata *ppd = dd->pport;
5558         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5559
5560         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5561             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5562                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5563                 set_link_down_reason(
5564                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5565                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5566                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5567         }
5568         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5569
5570         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5571 }
5572
5573 static int init_rcverr(struct hfi1_devdata *dd)
5574 {
5575         timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5576         /* Assume the hardware counter has been reset */
5577         dd->rcv_ovfl_cnt = 0;
5578         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5579 }
5580
5581 static void free_rcverr(struct hfi1_devdata *dd)
5582 {
5583         if (dd->rcverr_timer.function)
5584                 del_timer_sync(&dd->rcverr_timer);
5585 }
5586
5587 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5588 {
5589         char buf[96];
5590         int i = 0;
5591
5592         dd_dev_info(dd, "Receive Error: %s\n",
5593                     rxe_err_status_string(buf, sizeof(buf), reg));
5594
5595         if (reg & ALL_RXE_FREEZE_ERR) {
5596                 int flags = 0;
5597
5598                 /*
5599                  * Freeze mode recovery is disabled for the errors
5600                  * in RXE_FREEZE_ABORT_MASK
5601                  */
5602                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5603                         flags = FREEZE_ABORT;
5604
5605                 start_freeze_handling(dd->pport, flags);
5606         }
5607
5608         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5609                 if (reg & (1ull << i))
5610                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5611         }
5612 }
5613
5614 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5615 {
5616         char buf[96];
5617         int i = 0;
5618
5619         dd_dev_info(dd, "Misc Error: %s",
5620                     misc_err_status_string(buf, sizeof(buf), reg));
5621         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5622                 if (reg & (1ull << i))
5623                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5624         }
5625 }
5626
5627 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5628 {
5629         char buf[96];
5630         int i = 0;
5631
5632         dd_dev_info(dd, "PIO Error: %s\n",
5633                     pio_err_status_string(buf, sizeof(buf), reg));
5634
5635         if (reg & ALL_PIO_FREEZE_ERR)
5636                 start_freeze_handling(dd->pport, 0);
5637
5638         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5639                 if (reg & (1ull << i))
5640                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5641         }
5642 }
5643
5644 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5645 {
5646         char buf[96];
5647         int i = 0;
5648
5649         dd_dev_info(dd, "SDMA Error: %s\n",
5650                     sdma_err_status_string(buf, sizeof(buf), reg));
5651
5652         if (reg & ALL_SDMA_FREEZE_ERR)
5653                 start_freeze_handling(dd->pport, 0);
5654
5655         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5656                 if (reg & (1ull << i))
5657                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5658         }
5659 }
5660
5661 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5662 {
5663         incr_cntr64(&ppd->port_xmit_discards);
5664 }
5665
5666 static void count_port_inactive(struct hfi1_devdata *dd)
5667 {
5668         __count_port_discards(dd->pport);
5669 }
5670
5671 /*
5672  * We have had a "disallowed packet" error during egress. Determine the
5673  * integrity check which failed, and update relevant error counter, etc.
5674  *
5675  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5676  * bit of state per integrity check, and so we can miss the reason for an
5677  * egress error if more than one packet fails the same integrity check
5678  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5679  */
5680 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5681                                         int vl)
5682 {
5683         struct hfi1_pportdata *ppd = dd->pport;
5684         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5685         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5686         char buf[96];
5687
5688         /* clear down all observed info as quickly as possible after read */
5689         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5690
5691         dd_dev_info(dd,
5692                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5693                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5694
5695         /* Eventually add other counters for each bit */
5696         if (info & PORT_DISCARD_EGRESS_ERRS) {
5697                 int weight, i;
5698
5699                 /*
5700                  * Count all applicable bits as individual errors and
5701                  * attribute them to the packet that triggered this handler.
5702                  * This may not be completely accurate due to limitations
5703                  * on the available hardware error information.  There is
5704                  * a single information register and any number of error
5705                  * packets may have occurred and contributed to it before
5706                  * this routine is called.  This means that:
5707                  * a) If multiple packets with the same error occur before
5708                  *    this routine is called, earlier packets are missed.
5709                  *    There is only a single bit for each error type.
5710                  * b) Errors may not be attributed to the correct VL.
5711                  *    The driver is attributing all bits in the info register
5712                  *    to the packet that triggered this call, but bits
5713                  *    could be an accumulation of different packets with
5714                  *    different VLs.
5715                  * c) A single error packet may have multiple counts attached
5716                  *    to it.  There is no way for the driver to know if
5717                  *    multiple bits set in the info register are due to a
5718                  *    single packet or multiple packets.  The driver assumes
5719                  *    multiple packets.
5720                  */
5721                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5722                 for (i = 0; i < weight; i++) {
5723                         __count_port_discards(ppd);
5724                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5725                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5726                         else if (vl == 15)
5727                                 incr_cntr64(&ppd->port_xmit_discards_vl
5728                                             [C_VL_15]);
5729                 }
5730         }
5731 }
5732
5733 /*
5734  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5735  * register. Does it represent a 'port inactive' error?
5736  */
5737 static inline int port_inactive_err(u64 posn)
5738 {
5739         return (posn >= SEES(TX_LINKDOWN) &&
5740                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5741 }
5742
5743 /*
5744  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5745  * register. Does it represent a 'disallowed packet' error?
5746  */
5747 static inline int disallowed_pkt_err(int posn)
5748 {
5749         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5750                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5751 }
5752
5753 /*
5754  * Input value is a bit position of one of the SDMA engine disallowed
5755  * packet errors.  Return which engine.  Use of this must be guarded by
5756  * disallowed_pkt_err().
5757  */
5758 static inline int disallowed_pkt_engine(int posn)
5759 {
5760         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5761 }
5762
5763 /*
5764  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5765  * be done.
5766  */
5767 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5768 {
5769         struct sdma_vl_map *m;
5770         int vl;
5771
5772         /* range check */
5773         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5774                 return -1;
5775
5776         rcu_read_lock();
5777         m = rcu_dereference(dd->sdma_map);
5778         vl = m->engine_to_vl[engine];
5779         rcu_read_unlock();
5780
5781         return vl;
5782 }
5783
5784 /*
5785  * Translate the send context (sofware index) into a VL.  Return -1 if the
5786  * translation cannot be done.
5787  */
5788 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5789 {
5790         struct send_context_info *sci;
5791         struct send_context *sc;
5792         int i;
5793
5794         sci = &dd->send_contexts[sw_index];
5795
5796         /* there is no information for user (PSM) and ack contexts */
5797         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5798                 return -1;
5799
5800         sc = sci->sc;
5801         if (!sc)
5802                 return -1;
5803         if (dd->vld[15].sc == sc)
5804                 return 15;
5805         for (i = 0; i < num_vls; i++)
5806                 if (dd->vld[i].sc == sc)
5807                         return i;
5808
5809         return -1;
5810 }
5811
5812 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5813 {
5814         u64 reg_copy = reg, handled = 0;
5815         char buf[96];
5816         int i = 0;
5817
5818         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5819                 start_freeze_handling(dd->pport, 0);
5820         else if (is_ax(dd) &&
5821                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5822                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5823                 start_freeze_handling(dd->pport, 0);
5824
5825         while (reg_copy) {
5826                 int posn = fls64(reg_copy);
5827                 /* fls64() returns a 1-based offset, we want it zero based */
5828                 int shift = posn - 1;
5829                 u64 mask = 1ULL << shift;
5830
5831                 if (port_inactive_err(shift)) {
5832                         count_port_inactive(dd);
5833                         handled |= mask;
5834                 } else if (disallowed_pkt_err(shift)) {
5835                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5836
5837                         handle_send_egress_err_info(dd, vl);
5838                         handled |= mask;
5839                 }
5840                 reg_copy &= ~mask;
5841         }
5842
5843         reg &= ~handled;
5844
5845         if (reg)
5846                 dd_dev_info(dd, "Egress Error: %s\n",
5847                             egress_err_status_string(buf, sizeof(buf), reg));
5848
5849         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5850                 if (reg & (1ull << i))
5851                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5852         }
5853 }
5854
5855 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5856 {
5857         char buf[96];
5858         int i = 0;
5859
5860         dd_dev_info(dd, "Send Error: %s\n",
5861                     send_err_status_string(buf, sizeof(buf), reg));
5862
5863         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5864                 if (reg & (1ull << i))
5865                         incr_cntr64(&dd->send_err_status_cnt[i]);
5866         }
5867 }
5868
5869 /*
5870  * The maximum number of times the error clear down will loop before
5871  * blocking a repeating error.  This value is arbitrary.
5872  */
5873 #define MAX_CLEAR_COUNT 20
5874
5875 /*
5876  * Clear and handle an error register.  All error interrupts are funneled
5877  * through here to have a central location to correctly handle single-
5878  * or multi-shot errors.
5879  *
5880  * For non per-context registers, call this routine with a context value
5881  * of 0 so the per-context offset is zero.
5882  *
5883  * If the handler loops too many times, assume that something is wrong
5884  * and can't be fixed, so mask the error bits.
5885  */
5886 static void interrupt_clear_down(struct hfi1_devdata *dd,
5887                                  u32 context,
5888                                  const struct err_reg_info *eri)
5889 {
5890         u64 reg;
5891         u32 count;
5892
5893         /* read in a loop until no more errors are seen */
5894         count = 0;
5895         while (1) {
5896                 reg = read_kctxt_csr(dd, context, eri->status);
5897                 if (reg == 0)
5898                         break;
5899                 write_kctxt_csr(dd, context, eri->clear, reg);
5900                 if (likely(eri->handler))
5901                         eri->handler(dd, context, reg);
5902                 count++;
5903                 if (count > MAX_CLEAR_COUNT) {
5904                         u64 mask;
5905
5906                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5907                                    eri->desc, reg);
5908                         /*
5909                          * Read-modify-write so any other masked bits
5910                          * remain masked.
5911                          */
5912                         mask = read_kctxt_csr(dd, context, eri->mask);
5913                         mask &= ~reg;
5914                         write_kctxt_csr(dd, context, eri->mask, mask);
5915                         break;
5916                 }
5917         }
5918 }
5919
5920 /*
5921  * CCE block "misc" interrupt.  Source is < 16.
5922  */
5923 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5924 {
5925         const struct err_reg_info *eri = &misc_errs[source];
5926
5927         if (eri->handler) {
5928                 interrupt_clear_down(dd, 0, eri);
5929         } else {
5930                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5931                            source);
5932         }
5933 }
5934
5935 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5936 {
5937         return flag_string(buf, buf_len, flags,
5938                            sc_err_status_flags,
5939                            ARRAY_SIZE(sc_err_status_flags));
5940 }
5941
5942 /*
5943  * Send context error interrupt.  Source (hw_context) is < 160.
5944  *
5945  * All send context errors cause the send context to halt.  The normal
5946  * clear-down mechanism cannot be used because we cannot clear the
5947  * error bits until several other long-running items are done first.
5948  * This is OK because with the context halted, nothing else is going
5949  * to happen on it anyway.
5950  */
5951 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5952                                 unsigned int hw_context)
5953 {
5954         struct send_context_info *sci;
5955         struct send_context *sc;
5956         char flags[96];
5957         u64 status;
5958         u32 sw_index;
5959         int i = 0;
5960         unsigned long irq_flags;
5961
5962         sw_index = dd->hw_to_sw[hw_context];
5963         if (sw_index >= dd->num_send_contexts) {
5964                 dd_dev_err(dd,
5965                            "out of range sw index %u for send context %u\n",
5966                            sw_index, hw_context);
5967                 return;
5968         }
5969         sci = &dd->send_contexts[sw_index];
5970         spin_lock_irqsave(&dd->sc_lock, irq_flags);
5971         sc = sci->sc;
5972         if (!sc) {
5973                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5974                            sw_index, hw_context);
5975                 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5976                 return;
5977         }
5978
5979         /* tell the software that a halt has begun */
5980         sc_stop(sc, SCF_HALTED);
5981
5982         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5983
5984         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5985                     send_context_err_status_string(flags, sizeof(flags),
5986                                                    status));
5987
5988         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5989                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5990
5991         /*
5992          * Automatically restart halted kernel contexts out of interrupt
5993          * context.  User contexts must ask the driver to restart the context.
5994          */
5995         if (sc->type != SC_USER)
5996                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5997         spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5998
5999         /*
6000          * Update the counters for the corresponding status bits.
6001          * Note that these particular counters are aggregated over all
6002          * 160 contexts.
6003          */
6004         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6005                 if (status & (1ull << i))
6006                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6007         }
6008 }
6009
6010 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6011                                 unsigned int source, u64 status)
6012 {
6013         struct sdma_engine *sde;
6014         int i = 0;
6015
6016         sde = &dd->per_sdma[source];
6017 #ifdef CONFIG_SDMA_VERBOSITY
6018         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6019                    slashstrip(__FILE__), __LINE__, __func__);
6020         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6021                    sde->this_idx, source, (unsigned long long)status);
6022 #endif
6023         sde->err_cnt++;
6024         sdma_engine_error(sde, status);
6025
6026         /*
6027         * Update the counters for the corresponding status bits.
6028         * Note that these particular counters are aggregated over
6029         * all 16 DMA engines.
6030         */
6031         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6032                 if (status & (1ull << i))
6033                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6034         }
6035 }
6036
6037 /*
6038  * CCE block SDMA error interrupt.  Source is < 16.
6039  */
6040 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6041 {
6042 #ifdef CONFIG_SDMA_VERBOSITY
6043         struct sdma_engine *sde = &dd->per_sdma[source];
6044
6045         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6046                    slashstrip(__FILE__), __LINE__, __func__);
6047         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6048                    source);
6049         sdma_dumpstate(sde);
6050 #endif
6051         interrupt_clear_down(dd, source, &sdma_eng_err);
6052 }
6053
6054 /*
6055  * CCE block "various" interrupt.  Source is < 8.
6056  */
6057 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6058 {
6059         const struct err_reg_info *eri = &various_err[source];
6060
6061         /*
6062          * TCritInt cannot go through interrupt_clear_down()
6063          * because it is not a second tier interrupt. The handler
6064          * should be called directly.
6065          */
6066         if (source == TCRIT_INT_SOURCE)
6067                 handle_temp_err(dd);
6068         else if (eri->handler)
6069                 interrupt_clear_down(dd, 0, eri);
6070         else
6071                 dd_dev_info(dd,
6072                             "%s: Unimplemented/reserved interrupt %d\n",
6073                             __func__, source);
6074 }
6075
6076 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6077 {
6078         /* src_ctx is always zero */
6079         struct hfi1_pportdata *ppd = dd->pport;
6080         unsigned long flags;
6081         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6082
6083         if (reg & QSFP_HFI0_MODPRST_N) {
6084                 if (!qsfp_mod_present(ppd)) {
6085                         dd_dev_info(dd, "%s: QSFP module removed\n",
6086                                     __func__);
6087
6088                         ppd->driver_link_ready = 0;
6089                         /*
6090                          * Cable removed, reset all our information about the
6091                          * cache and cable capabilities
6092                          */
6093
6094                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6095                         /*
6096                          * We don't set cache_refresh_required here as we expect
6097                          * an interrupt when a cable is inserted
6098                          */
6099                         ppd->qsfp_info.cache_valid = 0;
6100                         ppd->qsfp_info.reset_needed = 0;
6101                         ppd->qsfp_info.limiting_active = 0;
6102                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6103                                                flags);
6104                         /* Invert the ModPresent pin now to detect plug-in */
6105                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6106                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6107
6108                         if ((ppd->offline_disabled_reason >
6109                           HFI1_ODR_MASK(
6110                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6111                           (ppd->offline_disabled_reason ==
6112                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6113                                 ppd->offline_disabled_reason =
6114                                 HFI1_ODR_MASK(
6115                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6116
6117                         if (ppd->host_link_state == HLS_DN_POLL) {
6118                                 /*
6119                                  * The link is still in POLL. This means
6120                                  * that the normal link down processing
6121                                  * will not happen. We have to do it here
6122                                  * before turning the DC off.
6123                                  */
6124                                 queue_work(ppd->link_wq, &ppd->link_down_work);
6125                         }
6126                 } else {
6127                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6128                                     __func__);
6129
6130                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6131                         ppd->qsfp_info.cache_valid = 0;
6132                         ppd->qsfp_info.cache_refresh_required = 1;
6133                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6134                                                flags);
6135
6136                         /*
6137                          * Stop inversion of ModPresent pin to detect
6138                          * removal of the cable
6139                          */
6140                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6141                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6142                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6143
6144                         ppd->offline_disabled_reason =
6145                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6146                 }
6147         }
6148
6149         if (reg & QSFP_HFI0_INT_N) {
6150                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6151                             __func__);
6152                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6153                 ppd->qsfp_info.check_interrupt_flags = 1;
6154                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6155         }
6156
6157         /* Schedule the QSFP work only if there is a cable attached. */
6158         if (qsfp_mod_present(ppd))
6159                 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6160 }
6161
6162 static int request_host_lcb_access(struct hfi1_devdata *dd)
6163 {
6164         int ret;
6165
6166         ret = do_8051_command(dd, HCMD_MISC,
6167                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6168                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6169         if (ret != HCMD_SUCCESS) {
6170                 dd_dev_err(dd, "%s: command failed with error %d\n",
6171                            __func__, ret);
6172         }
6173         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6174 }
6175
6176 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6177 {
6178         int ret;
6179
6180         ret = do_8051_command(dd, HCMD_MISC,
6181                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6182                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6183         if (ret != HCMD_SUCCESS) {
6184                 dd_dev_err(dd, "%s: command failed with error %d\n",
6185                            __func__, ret);
6186         }
6187         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6188 }
6189
6190 /*
6191  * Set the LCB selector - allow host access.  The DCC selector always
6192  * points to the host.
6193  */
6194 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6195 {
6196         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6197                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6198                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6199 }
6200
6201 /*
6202  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6203  * points to the host.
6204  */
6205 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6206 {
6207         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6208                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6209 }
6210
6211 /*
6212  * Acquire LCB access from the 8051.  If the host already has access,
6213  * just increment a counter.  Otherwise, inform the 8051 that the
6214  * host is taking access.
6215  *
6216  * Returns:
6217  *      0 on success
6218  *      -EBUSY if the 8051 has control and cannot be disturbed
6219  *      -errno if unable to acquire access from the 8051
6220  */
6221 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6222 {
6223         struct hfi1_pportdata *ppd = dd->pport;
6224         int ret = 0;
6225
6226         /*
6227          * Use the host link state lock so the operation of this routine
6228          * { link state check, selector change, count increment } can occur
6229          * as a unit against a link state change.  Otherwise there is a
6230          * race between the state change and the count increment.
6231          */
6232         if (sleep_ok) {
6233                 mutex_lock(&ppd->hls_lock);
6234         } else {
6235                 while (!mutex_trylock(&ppd->hls_lock))
6236                         udelay(1);
6237         }
6238
6239         /* this access is valid only when the link is up */
6240         if (ppd->host_link_state & HLS_DOWN) {
6241                 dd_dev_info(dd, "%s: link state %s not up\n",
6242                             __func__, link_state_name(ppd->host_link_state));
6243                 ret = -EBUSY;
6244                 goto done;
6245         }
6246
6247         if (dd->lcb_access_count == 0) {
6248                 ret = request_host_lcb_access(dd);
6249                 if (ret) {
6250                         dd_dev_err(dd,
6251                                    "%s: unable to acquire LCB access, err %d\n",
6252                                    __func__, ret);
6253                         goto done;
6254                 }
6255                 set_host_lcb_access(dd);
6256         }
6257         dd->lcb_access_count++;
6258 done:
6259         mutex_unlock(&ppd->hls_lock);
6260         return ret;
6261 }
6262
6263 /*
6264  * Release LCB access by decrementing the use count.  If the count is moving
6265  * from 1 to 0, inform 8051 that it has control back.
6266  *
6267  * Returns:
6268  *      0 on success
6269  *      -errno if unable to release access to the 8051
6270  */
6271 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6272 {
6273         int ret = 0;
6274
6275         /*
6276          * Use the host link state lock because the acquire needed it.
6277          * Here, we only need to keep { selector change, count decrement }
6278          * as a unit.
6279          */
6280         if (sleep_ok) {
6281                 mutex_lock(&dd->pport->hls_lock);
6282         } else {
6283                 while (!mutex_trylock(&dd->pport->hls_lock))
6284                         udelay(1);
6285         }
6286
6287         if (dd->lcb_access_count == 0) {
6288                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6289                            __func__);
6290                 goto done;
6291         }
6292
6293         if (dd->lcb_access_count == 1) {
6294                 set_8051_lcb_access(dd);
6295                 ret = request_8051_lcb_access(dd);
6296                 if (ret) {
6297                         dd_dev_err(dd,
6298                                    "%s: unable to release LCB access, err %d\n",
6299                                    __func__, ret);
6300                         /* restore host access if the grant didn't work */
6301                         set_host_lcb_access(dd);
6302                         goto done;
6303                 }
6304         }
6305         dd->lcb_access_count--;
6306 done:
6307         mutex_unlock(&dd->pport->hls_lock);
6308         return ret;
6309 }
6310
6311 /*
6312  * Initialize LCB access variables and state.  Called during driver load,
6313  * after most of the initialization is finished.
6314  *
6315  * The DC default is LCB access on for the host.  The driver defaults to
6316  * leaving access to the 8051.  Assign access now - this constrains the call
6317  * to this routine to be after all LCB set-up is done.  In particular, after
6318  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6319  */
6320 static void init_lcb_access(struct hfi1_devdata *dd)
6321 {
6322         dd->lcb_access_count = 0;
6323 }
6324
6325 /*
6326  * Write a response back to a 8051 request.
6327  */
6328 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6329 {
6330         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6331                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6332                   (u64)return_code <<
6333                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6334                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6335 }
6336
6337 /*
6338  * Handle host requests from the 8051.
6339  */
6340 static void handle_8051_request(struct hfi1_pportdata *ppd)
6341 {
6342         struct hfi1_devdata *dd = ppd->dd;
6343         u64 reg;
6344         u16 data = 0;
6345         u8 type;
6346
6347         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6348         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6349                 return; /* no request */
6350
6351         /* zero out COMPLETED so the response is seen */
6352         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6353
6354         /* extract request details */
6355         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6356                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6357         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6358                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6359
6360         switch (type) {
6361         case HREQ_LOAD_CONFIG:
6362         case HREQ_SAVE_CONFIG:
6363         case HREQ_READ_CONFIG:
6364         case HREQ_SET_TX_EQ_ABS:
6365         case HREQ_SET_TX_EQ_REL:
6366         case HREQ_ENABLE:
6367                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6368                             type);
6369                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6370                 break;
6371         case HREQ_LCB_RESET:
6372                 /* Put the LCB, RX FPE and TX FPE into reset */
6373                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6374                 /* Make sure the write completed */
6375                 (void)read_csr(dd, DCC_CFG_RESET);
6376                 /* Hold the reset long enough to take effect */
6377                 udelay(1);
6378                 /* Take the LCB, RX FPE and TX FPE out of reset */
6379                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6380                 hreq_response(dd, HREQ_SUCCESS, 0);
6381
6382                 break;
6383         case HREQ_CONFIG_DONE:
6384                 hreq_response(dd, HREQ_SUCCESS, 0);
6385                 break;
6386
6387         case HREQ_INTERFACE_TEST:
6388                 hreq_response(dd, HREQ_SUCCESS, data);
6389                 break;
6390         default:
6391                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6392                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6393                 break;
6394         }
6395 }
6396
6397 /*
6398  * Set up allocation unit vaulue.
6399  */
6400 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6401 {
6402         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6403
6404         /* do not modify other values in the register */
6405         reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6406         reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6407         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6408 }
6409
6410 /*
6411  * Set up initial VL15 credits of the remote.  Assumes the rest of
6412  * the CM credit registers are zero from a previous global or credit reset.
6413  * Shared limit for VL15 will always be 0.
6414  */
6415 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6416 {
6417         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6418
6419         /* set initial values for total and shared credit limit */
6420         reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6421                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6422
6423         /*
6424          * Set total limit to be equal to VL15 credits.
6425          * Leave shared limit at 0.
6426          */
6427         reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6428         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6429
6430         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6431                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6432 }
6433
6434 /*
6435  * Zero all credit details from the previous connection and
6436  * reset the CM manager's internal counters.
6437  */
6438 void reset_link_credits(struct hfi1_devdata *dd)
6439 {
6440         int i;
6441
6442         /* remove all previous VL credit limits */
6443         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6444                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6445         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6446         write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6447         /* reset the CM block */
6448         pio_send_control(dd, PSC_CM_RESET);
6449         /* reset cached value */
6450         dd->vl15buf_cached = 0;
6451 }
6452
6453 /* convert a vCU to a CU */
6454 static u32 vcu_to_cu(u8 vcu)
6455 {
6456         return 1 << vcu;
6457 }
6458
6459 /* convert a CU to a vCU */
6460 static u8 cu_to_vcu(u32 cu)
6461 {
6462         return ilog2(cu);
6463 }
6464
6465 /* convert a vAU to an AU */
6466 static u32 vau_to_au(u8 vau)
6467 {
6468         return 8 * (1 << vau);
6469 }
6470
6471 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6472 {
6473         ppd->sm_trap_qp = 0x0;
6474         ppd->sa_qp = 0x1;
6475 }
6476
6477 /*
6478  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6479  */
6480 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6481 {
6482         u64 reg;
6483
6484         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6485         write_csr(dd, DC_LCB_CFG_RUN, 0);
6486         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6487         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6488                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6489         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6490         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6491         reg = read_csr(dd, DCC_CFG_RESET);
6492         write_csr(dd, DCC_CFG_RESET, reg |
6493                   DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6494         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6495         if (!abort) {
6496                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6497                 write_csr(dd, DCC_CFG_RESET, reg);
6498                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6499         }
6500 }
6501
6502 /*
6503  * This routine should be called after the link has been transitioned to
6504  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6505  * reset).
6506  *
6507  * The expectation is that the caller of this routine would have taken
6508  * care of properly transitioning the link into the correct state.
6509  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6510  *       before calling this function.
6511  */
6512 static void _dc_shutdown(struct hfi1_devdata *dd)
6513 {
6514         lockdep_assert_held(&dd->dc8051_lock);
6515
6516         if (dd->dc_shutdown)
6517                 return;
6518
6519         dd->dc_shutdown = 1;
6520         /* Shutdown the LCB */
6521         lcb_shutdown(dd, 1);
6522         /*
6523          * Going to OFFLINE would have causes the 8051 to put the
6524          * SerDes into reset already. Just need to shut down the 8051,
6525          * itself.
6526          */
6527         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6528 }
6529
6530 static void dc_shutdown(struct hfi1_devdata *dd)
6531 {
6532         mutex_lock(&dd->dc8051_lock);
6533         _dc_shutdown(dd);
6534         mutex_unlock(&dd->dc8051_lock);
6535 }
6536
6537 /*
6538  * Calling this after the DC has been brought out of reset should not
6539  * do any damage.
6540  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6541  *       before calling this function.
6542  */
6543 static void _dc_start(struct hfi1_devdata *dd)
6544 {
6545         lockdep_assert_held(&dd->dc8051_lock);
6546
6547         if (!dd->dc_shutdown)
6548                 return;
6549
6550         /* Take the 8051 out of reset */
6551         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6552         /* Wait until 8051 is ready */
6553         if (wait_fm_ready(dd, TIMEOUT_8051_START))
6554                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6555                            __func__);
6556
6557         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6558         write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6559         /* lcb_shutdown() with abort=1 does not restore these */
6560         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6561         dd->dc_shutdown = 0;
6562 }
6563
6564 static void dc_start(struct hfi1_devdata *dd)
6565 {
6566         mutex_lock(&dd->dc8051_lock);
6567         _dc_start(dd);
6568         mutex_unlock(&dd->dc8051_lock);
6569 }
6570
6571 /*
6572  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6573  */
6574 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6575 {
6576         u64 rx_radr, tx_radr;
6577         u32 version;
6578
6579         if (dd->icode != ICODE_FPGA_EMULATION)
6580                 return;
6581
6582         /*
6583          * These LCB defaults on emulator _s are good, nothing to do here:
6584          *      LCB_CFG_TX_FIFOS_RADR
6585          *      LCB_CFG_RX_FIFOS_RADR
6586          *      LCB_CFG_LN_DCLK
6587          *      LCB_CFG_IGNORE_LOST_RCLK
6588          */
6589         if (is_emulator_s(dd))
6590                 return;
6591         /* else this is _p */
6592
6593         version = emulator_rev(dd);
6594         if (!is_ax(dd))
6595                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6596
6597         if (version <= 0x12) {
6598                 /* release 0x12 and below */
6599
6600                 /*
6601                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6602                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6603                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6604                  */
6605                 rx_radr =
6606                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6607                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6608                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6609                 /*
6610                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6611                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6612                  */
6613                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6614         } else if (version <= 0x18) {
6615                 /* release 0x13 up to 0x18 */
6616                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6617                 rx_radr =
6618                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6619                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6620                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6621                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6622         } else if (version == 0x19) {
6623                 /* release 0x19 */
6624                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6625                 rx_radr =
6626                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6627                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6628                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6629                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6630         } else if (version == 0x1a) {
6631                 /* release 0x1a */
6632                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6633                 rx_radr =
6634                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6635                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6636                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6637                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6638                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6639         } else {
6640                 /* release 0x1b and higher */
6641                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6642                 rx_radr =
6643                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6644                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6645                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6646                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6647         }
6648
6649         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6650         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6651         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6652                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6653         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6654 }
6655
6656 /*
6657  * Handle a SMA idle message
6658  *
6659  * This is a work-queue function outside of the interrupt.
6660  */
6661 void handle_sma_message(struct work_struct *work)
6662 {
6663         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6664                                                         sma_message_work);
6665         struct hfi1_devdata *dd = ppd->dd;
6666         u64 msg;
6667         int ret;
6668
6669         /*
6670          * msg is bytes 1-4 of the 40-bit idle message - the command code
6671          * is stripped off
6672          */
6673         ret = read_idle_sma(dd, &msg);
6674         if (ret)
6675                 return;
6676         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6677         /*
6678          * React to the SMA message.  Byte[1] (0 for us) is the command.
6679          */
6680         switch (msg & 0xff) {
6681         case SMA_IDLE_ARM:
6682                 /*
6683                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6684                  * State Transitions
6685                  *
6686                  * Only expected in INIT or ARMED, discard otherwise.
6687                  */
6688                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6689                         ppd->neighbor_normal = 1;
6690                 break;
6691         case SMA_IDLE_ACTIVE:
6692                 /*
6693                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6694                  * State Transitions
6695                  *
6696                  * Can activate the node.  Discard otherwise.
6697                  */
6698                 if (ppd->host_link_state == HLS_UP_ARMED &&
6699                     ppd->is_active_optimize_enabled) {
6700                         ppd->neighbor_normal = 1;
6701                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6702                         if (ret)
6703                                 dd_dev_err(
6704                                         dd,
6705                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6706                                         __func__);
6707                 }
6708                 break;
6709         default:
6710                 dd_dev_err(dd,
6711                            "%s: received unexpected SMA idle message 0x%llx\n",
6712                            __func__, msg);
6713                 break;
6714         }
6715 }
6716
6717 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6718 {
6719         u64 rcvctrl;
6720         unsigned long flags;
6721
6722         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6723         rcvctrl = read_csr(dd, RCV_CTRL);
6724         rcvctrl |= add;
6725         rcvctrl &= ~clear;
6726         write_csr(dd, RCV_CTRL, rcvctrl);
6727         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6728 }
6729
6730 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6731 {
6732         adjust_rcvctrl(dd, add, 0);
6733 }
6734
6735 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6736 {
6737         adjust_rcvctrl(dd, 0, clear);
6738 }
6739
6740 /*
6741  * Called from all interrupt handlers to start handling an SPC freeze.
6742  */
6743 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6744 {
6745         struct hfi1_devdata *dd = ppd->dd;
6746         struct send_context *sc;
6747         int i;
6748         int sc_flags;
6749
6750         if (flags & FREEZE_SELF)
6751                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6752
6753         /* enter frozen mode */
6754         dd->flags |= HFI1_FROZEN;
6755
6756         /* notify all SDMA engines that they are going into a freeze */
6757         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6758
6759         sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6760                                               SCF_LINK_DOWN : 0);
6761         /* do halt pre-handling on all enabled send contexts */
6762         for (i = 0; i < dd->num_send_contexts; i++) {
6763                 sc = dd->send_contexts[i].sc;
6764                 if (sc && (sc->flags & SCF_ENABLED))
6765                         sc_stop(sc, sc_flags);
6766         }
6767
6768         /* Send context are frozen. Notify user space */
6769         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6770
6771         if (flags & FREEZE_ABORT) {
6772                 dd_dev_err(dd,
6773                            "Aborted freeze recovery. Please REBOOT system\n");
6774                 return;
6775         }
6776         /* queue non-interrupt handler */
6777         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6778 }
6779
6780 /*
6781  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6782  * depending on the "freeze" parameter.
6783  *
6784  * No need to return an error if it times out, our only option
6785  * is to proceed anyway.
6786  */
6787 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6788 {
6789         unsigned long timeout;
6790         u64 reg;
6791
6792         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6793         while (1) {
6794                 reg = read_csr(dd, CCE_STATUS);
6795                 if (freeze) {
6796                         /* waiting until all indicators are set */
6797                         if ((reg & ALL_FROZE) == ALL_FROZE)
6798                                 return; /* all done */
6799                 } else {
6800                         /* waiting until all indicators are clear */
6801                         if ((reg & ALL_FROZE) == 0)
6802                                 return; /* all done */
6803                 }
6804
6805                 if (time_after(jiffies, timeout)) {
6806                         dd_dev_err(dd,
6807                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6808                                    freeze ? "" : "un", reg & ALL_FROZE,
6809                                    freeze ? ALL_FROZE : 0ull);
6810                         return;
6811                 }
6812                 usleep_range(80, 120);
6813         }
6814 }
6815
6816 /*
6817  * Do all freeze handling for the RXE block.
6818  */
6819 static void rxe_freeze(struct hfi1_devdata *dd)
6820 {
6821         int i;
6822         struct hfi1_ctxtdata *rcd;
6823
6824         /* disable port */
6825         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6826
6827         /* disable all receive contexts */
6828         for (i = 0; i < dd->num_rcv_contexts; i++) {
6829                 rcd = hfi1_rcd_get_by_index(dd, i);
6830                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6831                 hfi1_rcd_put(rcd);
6832         }
6833 }
6834
6835 /*
6836  * Unfreeze handling for the RXE block - kernel contexts only.
6837  * This will also enable the port.  User contexts will do unfreeze
6838  * handling on a per-context basis as they call into the driver.
6839  *
6840  */
6841 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6842 {
6843         u32 rcvmask;
6844         u16 i;
6845         struct hfi1_ctxtdata *rcd;
6846
6847         /* enable all kernel contexts */
6848         for (i = 0; i < dd->num_rcv_contexts; i++) {
6849                 rcd = hfi1_rcd_get_by_index(dd, i);
6850
6851                 /* Ensure all non-user contexts(including vnic) are enabled */
6852                 if (!rcd ||
6853                     (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6854                         hfi1_rcd_put(rcd);
6855                         continue;
6856                 }
6857                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6858                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6859                 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6860                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6861                 hfi1_rcvctrl(dd, rcvmask, rcd);
6862                 hfi1_rcd_put(rcd);
6863         }
6864
6865         /* enable port */
6866         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6867 }
6868
6869 /*
6870  * Non-interrupt SPC freeze handling.
6871  *
6872  * This is a work-queue function outside of the triggering interrupt.
6873  */
6874 void handle_freeze(struct work_struct *work)
6875 {
6876         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6877                                                                 freeze_work);
6878         struct hfi1_devdata *dd = ppd->dd;
6879
6880         /* wait for freeze indicators on all affected blocks */
6881         wait_for_freeze_status(dd, 1);
6882
6883         /* SPC is now frozen */
6884
6885         /* do send PIO freeze steps */
6886         pio_freeze(dd);
6887
6888         /* do send DMA freeze steps */
6889         sdma_freeze(dd);
6890
6891         /* do send egress freeze steps - nothing to do */
6892
6893         /* do receive freeze steps */
6894         rxe_freeze(dd);
6895
6896         /*
6897          * Unfreeze the hardware - clear the freeze, wait for each
6898          * block's frozen bit to clear, then clear the frozen flag.
6899          */
6900         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6901         wait_for_freeze_status(dd, 0);
6902
6903         if (is_ax(dd)) {
6904                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6905                 wait_for_freeze_status(dd, 1);
6906                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6907                 wait_for_freeze_status(dd, 0);
6908         }
6909
6910         /* do send PIO unfreeze steps for kernel contexts */
6911         pio_kernel_unfreeze(dd);
6912
6913         /* do send DMA unfreeze steps */
6914         sdma_unfreeze(dd);
6915
6916         /* do send egress unfreeze steps - nothing to do */
6917
6918         /* do receive unfreeze steps for kernel contexts */
6919         rxe_kernel_unfreeze(dd);
6920
6921         /*
6922          * The unfreeze procedure touches global device registers when
6923          * it disables and re-enables RXE. Mark the device unfrozen
6924          * after all that is done so other parts of the driver waiting
6925          * for the device to unfreeze don't do things out of order.
6926          *
6927          * The above implies that the meaning of HFI1_FROZEN flag is
6928          * "Device has gone into freeze mode and freeze mode handling
6929          * is still in progress."
6930          *
6931          * The flag will be removed when freeze mode processing has
6932          * completed.
6933          */
6934         dd->flags &= ~HFI1_FROZEN;
6935         wake_up(&dd->event_queue);
6936
6937         /* no longer frozen */
6938 }
6939
6940 /**
6941  * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6942  * counters.
6943  * @ppd: info of physical Hfi port
6944  * @link_width: new link width after link up or downgrade
6945  *
6946  * Update the PortXmitWait and PortVlXmitWait counters after
6947  * a link up or downgrade event to reflect a link width change.
6948  */
6949 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6950 {
6951         int i;
6952         u16 tx_width;
6953         u16 link_speed;
6954
6955         tx_width = tx_link_width(link_width);
6956         link_speed = get_link_speed(ppd->link_speed_active);
6957
6958         /*
6959          * There are C_VL_COUNT number of PortVLXmitWait counters.
6960          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6961          */
6962         for (i = 0; i < C_VL_COUNT + 1; i++)
6963                 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6964 }
6965
6966 /*
6967  * Handle a link up interrupt from the 8051.
6968  *
6969  * This is a work-queue function outside of the interrupt.
6970  */
6971 void handle_link_up(struct work_struct *work)
6972 {
6973         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6974                                                   link_up_work);
6975         struct hfi1_devdata *dd = ppd->dd;
6976
6977         set_link_state(ppd, HLS_UP_INIT);
6978
6979         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6980         read_ltp_rtt(dd);
6981         /*
6982          * OPA specifies that certain counters are cleared on a transition
6983          * to link up, so do that.
6984          */
6985         clear_linkup_counters(dd);
6986         /*
6987          * And (re)set link up default values.
6988          */
6989         set_linkup_defaults(ppd);
6990
6991         /*
6992          * Set VL15 credits. Use cached value from verify cap interrupt.
6993          * In case of quick linkup or simulator, vl15 value will be set by
6994          * handle_linkup_change. VerifyCap interrupt handler will not be
6995          * called in those scenarios.
6996          */
6997         if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6998                 set_up_vl15(dd, dd->vl15buf_cached);
6999
7000         /* enforce link speed enabled */
7001         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7002                 /* oops - current speed is not enabled, bounce */
7003                 dd_dev_err(dd,
7004                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7005                            ppd->link_speed_active, ppd->link_speed_enabled);
7006                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7007                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
7008                 set_link_state(ppd, HLS_DN_OFFLINE);
7009                 start_link(ppd);
7010         }
7011 }
7012
7013 /*
7014  * Several pieces of LNI information were cached for SMA in ppd.
7015  * Reset these on link down
7016  */
7017 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7018 {
7019         ppd->neighbor_guid = 0;
7020         ppd->neighbor_port_number = 0;
7021         ppd->neighbor_type = 0;
7022         ppd->neighbor_fm_security = 0;
7023 }
7024
7025 static const char * const link_down_reason_strs[] = {
7026         [OPA_LINKDOWN_REASON_NONE] = "None",
7027         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7028         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7029         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7030         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7031         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7032         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7033         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7034         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7035         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7036         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7037         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7038         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7039         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7040         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7041         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7042         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7043         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7044         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7045         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7046         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7047         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7048         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7049         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7050         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7051         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7052         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7053         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7054         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7055         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7056         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7057         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7058         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7059                                         "Excessive buffer overrun",
7060         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7061         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7062         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7063         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7064         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7065         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7066         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7067         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7068                                         "Local media not installed",
7069         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7070         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7071         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7072                                         "End to end not installed",
7073         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7074         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7075         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7076         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7077         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7078         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7079 };
7080
7081 /* return the neighbor link down reason string */
7082 static const char *link_down_reason_str(u8 reason)
7083 {
7084         const char *str = NULL;
7085
7086         if (reason < ARRAY_SIZE(link_down_reason_strs))
7087                 str = link_down_reason_strs[reason];
7088         if (!str)
7089                 str = "(invalid)";
7090
7091         return str;
7092 }
7093
7094 /*
7095  * Handle a link down interrupt from the 8051.
7096  *
7097  * This is a work-queue function outside of the interrupt.
7098  */
7099 void handle_link_down(struct work_struct *work)
7100 {
7101         u8 lcl_reason, neigh_reason = 0;
7102         u8 link_down_reason;
7103         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7104                                                   link_down_work);
7105         int was_up;
7106         static const char ldr_str[] = "Link down reason: ";
7107
7108         if ((ppd->host_link_state &
7109              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7110              ppd->port_type == PORT_TYPE_FIXED)
7111                 ppd->offline_disabled_reason =
7112                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7113
7114         /* Go offline first, then deal with reading/writing through 8051 */
7115         was_up = !!(ppd->host_link_state & HLS_UP);
7116         set_link_state(ppd, HLS_DN_OFFLINE);
7117         xchg(&ppd->is_link_down_queued, 0);
7118
7119         if (was_up) {
7120                 lcl_reason = 0;
7121                 /* link down reason is only valid if the link was up */
7122                 read_link_down_reason(ppd->dd, &link_down_reason);
7123                 switch (link_down_reason) {
7124                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7125                         /* the link went down, no idle message reason */
7126                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7127                                     ldr_str);
7128                         break;
7129                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7130                         /*
7131                          * The neighbor reason is only valid if an idle message
7132                          * was received for it.
7133                          */
7134                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
7135                         dd_dev_info(ppd->dd,
7136                                     "%sNeighbor link down message %d, %s\n",
7137                                     ldr_str, neigh_reason,
7138                                     link_down_reason_str(neigh_reason));
7139                         break;
7140                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7141                         dd_dev_info(ppd->dd,
7142                                     "%sHost requested link to go offline\n",
7143                                     ldr_str);
7144                         break;
7145                 default:
7146                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7147                                     ldr_str, link_down_reason);
7148                         break;
7149                 }
7150
7151                 /*
7152                  * If no reason, assume peer-initiated but missed
7153                  * LinkGoingDown idle flits.
7154                  */
7155                 if (neigh_reason == 0)
7156                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7157         } else {
7158                 /* went down while polling or going up */
7159                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7160         }
7161
7162         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7163
7164         /* inform the SMA when the link transitions from up to down */
7165         if (was_up && ppd->local_link_down_reason.sma == 0 &&
7166             ppd->neigh_link_down_reason.sma == 0) {
7167                 ppd->local_link_down_reason.sma =
7168                                         ppd->local_link_down_reason.latest;
7169                 ppd->neigh_link_down_reason.sma =
7170                                         ppd->neigh_link_down_reason.latest;
7171         }
7172
7173         reset_neighbor_info(ppd);
7174
7175         /* disable the port */
7176         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7177
7178         /*
7179          * If there is no cable attached, turn the DC off. Otherwise,
7180          * start the link bring up.
7181          */
7182         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7183                 dc_shutdown(ppd->dd);
7184         else
7185                 start_link(ppd);
7186 }
7187
7188 void handle_link_bounce(struct work_struct *work)
7189 {
7190         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7191                                                         link_bounce_work);
7192
7193         /*
7194          * Only do something if the link is currently up.
7195          */
7196         if (ppd->host_link_state & HLS_UP) {
7197                 set_link_state(ppd, HLS_DN_OFFLINE);
7198                 start_link(ppd);
7199         } else {
7200                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7201                             __func__, link_state_name(ppd->host_link_state));
7202         }
7203 }
7204
7205 /*
7206  * Mask conversion: Capability exchange to Port LTP.  The capability
7207  * exchange has an implicit 16b CRC that is mandatory.
7208  */
7209 static int cap_to_port_ltp(int cap)
7210 {
7211         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7212
7213         if (cap & CAP_CRC_14B)
7214                 port_ltp |= PORT_LTP_CRC_MODE_14;
7215         if (cap & CAP_CRC_48B)
7216                 port_ltp |= PORT_LTP_CRC_MODE_48;
7217         if (cap & CAP_CRC_12B_16B_PER_LANE)
7218                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7219
7220         return port_ltp;
7221 }
7222
7223 /*
7224  * Convert an OPA Port LTP mask to capability mask
7225  */
7226 int port_ltp_to_cap(int port_ltp)
7227 {
7228         int cap_mask = 0;
7229
7230         if (port_ltp & PORT_LTP_CRC_MODE_14)
7231                 cap_mask |= CAP_CRC_14B;
7232         if (port_ltp & PORT_LTP_CRC_MODE_48)
7233                 cap_mask |= CAP_CRC_48B;
7234         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7235                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7236
7237         return cap_mask;
7238 }
7239
7240 /*
7241  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7242  */
7243 static int lcb_to_port_ltp(int lcb_crc)
7244 {
7245         int port_ltp = 0;
7246
7247         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7248                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7249         else if (lcb_crc == LCB_CRC_48B)
7250                 port_ltp = PORT_LTP_CRC_MODE_48;
7251         else if (lcb_crc == LCB_CRC_14B)
7252                 port_ltp = PORT_LTP_CRC_MODE_14;
7253         else
7254                 port_ltp = PORT_LTP_CRC_MODE_16;
7255
7256         return port_ltp;
7257 }
7258
7259 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7260 {
7261         if (ppd->pkeys[2] != 0) {
7262                 ppd->pkeys[2] = 0;
7263                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7264                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7265         }
7266 }
7267
7268 /*
7269  * Convert the given link width to the OPA link width bitmask.
7270  */
7271 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7272 {
7273         switch (width) {
7274         case 0:
7275                 /*
7276                  * Simulator and quick linkup do not set the width.
7277                  * Just set it to 4x without complaint.
7278                  */
7279                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7280                         return OPA_LINK_WIDTH_4X;
7281                 return 0; /* no lanes up */
7282         case 1: return OPA_LINK_WIDTH_1X;
7283         case 2: return OPA_LINK_WIDTH_2X;
7284         case 3: return OPA_LINK_WIDTH_3X;
7285         default:
7286                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7287                             __func__, width);
7288                 /* fall through */
7289         case 4: return OPA_LINK_WIDTH_4X;
7290         }
7291 }
7292
7293 /*
7294  * Do a population count on the bottom nibble.
7295  */
7296 static const u8 bit_counts[16] = {
7297         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7298 };
7299
7300 static inline u8 nibble_to_count(u8 nibble)
7301 {
7302         return bit_counts[nibble & 0xf];
7303 }
7304
7305 /*
7306  * Read the active lane information from the 8051 registers and return
7307  * their widths.
7308  *
7309  * Active lane information is found in these 8051 registers:
7310  *      enable_lane_tx
7311  *      enable_lane_rx
7312  */
7313 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7314                             u16 *rx_width)
7315 {
7316         u16 tx, rx;
7317         u8 enable_lane_rx;
7318         u8 enable_lane_tx;
7319         u8 tx_polarity_inversion;
7320         u8 rx_polarity_inversion;
7321         u8 max_rate;
7322
7323         /* read the active lanes */
7324         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7325                          &rx_polarity_inversion, &max_rate);
7326         read_local_lni(dd, &enable_lane_rx);
7327
7328         /* convert to counts */
7329         tx = nibble_to_count(enable_lane_tx);
7330         rx = nibble_to_count(enable_lane_rx);
7331
7332         /*
7333          * Set link_speed_active here, overriding what was set in
7334          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7335          * set the max_rate field in handle_verify_cap until v0.19.
7336          */
7337         if ((dd->icode == ICODE_RTL_SILICON) &&
7338             (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7339                 /* max_rate: 0 = 12.5G, 1 = 25G */
7340                 switch (max_rate) {
7341                 case 0:
7342                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7343                         break;
7344                 default:
7345                         dd_dev_err(dd,
7346                                    "%s: unexpected max rate %d, using 25Gb\n",
7347                                    __func__, (int)max_rate);
7348                         /* fall through */
7349                 case 1:
7350                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7351                         break;
7352                 }
7353         }
7354
7355         dd_dev_info(dd,
7356                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7357                     enable_lane_tx, tx, enable_lane_rx, rx);
7358         *tx_width = link_width_to_bits(dd, tx);
7359         *rx_width = link_width_to_bits(dd, rx);
7360 }
7361
7362 /*
7363  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7364  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7365  * after link up.  I.e. look elsewhere for downgrade information.
7366  *
7367  * Bits are:
7368  *      + bits [7:4] contain the number of active transmitters
7369  *      + bits [3:0] contain the number of active receivers
7370  * These are numbers 1 through 4 and can be different values if the
7371  * link is asymmetric.
7372  *
7373  * verify_cap_local_fm_link_width[0] retains its original value.
7374  */
7375 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7376                               u16 *rx_width)
7377 {
7378         u16 widths, tx, rx;
7379         u8 misc_bits, local_flags;
7380         u16 active_tx, active_rx;
7381
7382         read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7383         tx = widths >> 12;
7384         rx = (widths >> 8) & 0xf;
7385
7386         *tx_width = link_width_to_bits(dd, tx);
7387         *rx_width = link_width_to_bits(dd, rx);
7388
7389         /* print the active widths */
7390         get_link_widths(dd, &active_tx, &active_rx);
7391 }
7392
7393 /*
7394  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7395  * hardware information when the link first comes up.
7396  *
7397  * The link width is not available until after VerifyCap.AllFramesReceived
7398  * (the trigger for handle_verify_cap), so this is outside that routine
7399  * and should be called when the 8051 signals linkup.
7400  */
7401 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7402 {
7403         u16 tx_width, rx_width;
7404
7405         /* get end-of-LNI link widths */
7406         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7407
7408         /* use tx_width as the link is supposed to be symmetric on link up */
7409         ppd->link_width_active = tx_width;
7410         /* link width downgrade active (LWD.A) starts out matching LW.A */
7411         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7412         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7413         /* per OPA spec, on link up LWD.E resets to LWD.S */
7414         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7415         /* cache the active egress rate (units {10^6 bits/sec]) */
7416         ppd->current_egress_rate = active_egress_rate(ppd);
7417 }
7418
7419 /*
7420  * Handle a verify capabilities interrupt from the 8051.
7421  *
7422  * This is a work-queue function outside of the interrupt.
7423  */
7424 void handle_verify_cap(struct work_struct *work)
7425 {
7426         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7427                                                                 link_vc_work);
7428         struct hfi1_devdata *dd = ppd->dd;
7429         u64 reg;
7430         u8 power_management;
7431         u8 continuous;
7432         u8 vcu;
7433         u8 vau;
7434         u8 z;
7435         u16 vl15buf;
7436         u16 link_widths;
7437         u16 crc_mask;
7438         u16 crc_val;
7439         u16 device_id;
7440         u16 active_tx, active_rx;
7441         u8 partner_supported_crc;
7442         u8 remote_tx_rate;
7443         u8 device_rev;
7444
7445         set_link_state(ppd, HLS_VERIFY_CAP);
7446
7447         lcb_shutdown(dd, 0);
7448         adjust_lcb_for_fpga_serdes(dd);
7449
7450         read_vc_remote_phy(dd, &power_management, &continuous);
7451         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7452                               &partner_supported_crc);
7453         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7454         read_remote_device_id(dd, &device_id, &device_rev);
7455
7456         /* print the active widths */
7457         get_link_widths(dd, &active_tx, &active_rx);
7458         dd_dev_info(dd,
7459                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7460                     (int)power_management, (int)continuous);
7461         dd_dev_info(dd,
7462                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7463                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7464                     (int)partner_supported_crc);
7465         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7466                     (u32)remote_tx_rate, (u32)link_widths);
7467         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7468                     (u32)device_id, (u32)device_rev);
7469         /*
7470          * The peer vAU value just read is the peer receiver value.  HFI does
7471          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7472          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7473          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7474          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7475          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7476          * subject to the Z value exception.
7477          */
7478         if (vau == 0)
7479                 vau = 1;
7480         set_up_vau(dd, vau);
7481
7482         /*
7483          * Set VL15 credits to 0 in global credit register. Cache remote VL15
7484          * credits value and wait for link-up interrupt ot set it.
7485          */
7486         set_up_vl15(dd, 0);
7487         dd->vl15buf_cached = vl15buf;
7488
7489         /* set up the LCB CRC mode */
7490         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7491
7492         /* order is important: use the lowest bit in common */
7493         if (crc_mask & CAP_CRC_14B)
7494                 crc_val = LCB_CRC_14B;
7495         else if (crc_mask & CAP_CRC_48B)
7496                 crc_val = LCB_CRC_48B;
7497         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7498                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7499         else
7500                 crc_val = LCB_CRC_16B;
7501
7502         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7503         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7504                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7505
7506         /* set (14b only) or clear sideband credit */
7507         reg = read_csr(dd, SEND_CM_CTRL);
7508         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7509                 write_csr(dd, SEND_CM_CTRL,
7510                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7511         } else {
7512                 write_csr(dd, SEND_CM_CTRL,
7513                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7514         }
7515
7516         ppd->link_speed_active = 0;     /* invalid value */
7517         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7518                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7519                 switch (remote_tx_rate) {
7520                 case 0:
7521                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7522                         break;
7523                 case 1:
7524                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7525                         break;
7526                 }
7527         } else {
7528                 /* actual rate is highest bit of the ANDed rates */
7529                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7530
7531                 if (rate & 2)
7532                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7533                 else if (rate & 1)
7534                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7535         }
7536         if (ppd->link_speed_active == 0) {
7537                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7538                            __func__, (int)remote_tx_rate);
7539                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7540         }
7541
7542         /*
7543          * Cache the values of the supported, enabled, and active
7544          * LTP CRC modes to return in 'portinfo' queries. But the bit
7545          * flags that are returned in the portinfo query differ from
7546          * what's in the link_crc_mask, crc_sizes, and crc_val
7547          * variables. Convert these here.
7548          */
7549         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7550                 /* supported crc modes */
7551         ppd->port_ltp_crc_mode |=
7552                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7553                 /* enabled crc modes */
7554         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7555                 /* active crc mode */
7556
7557         /* set up the remote credit return table */
7558         assign_remote_cm_au_table(dd, vcu);
7559
7560         /*
7561          * The LCB is reset on entry to handle_verify_cap(), so this must
7562          * be applied on every link up.
7563          *
7564          * Adjust LCB error kill enable to kill the link if
7565          * these RBUF errors are seen:
7566          *      REPLAY_BUF_MBE_SMASK
7567          *      FLIT_INPUT_BUF_MBE_SMASK
7568          */
7569         if (is_ax(dd)) {                        /* fixed in B0 */
7570                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7571                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7572                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7573                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7574         }
7575
7576         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7577         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7578
7579         /* give 8051 access to the LCB CSRs */
7580         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7581         set_8051_lcb_access(dd);
7582
7583         /* tell the 8051 to go to LinkUp */
7584         set_link_state(ppd, HLS_GOING_UP);
7585 }
7586
7587 /**
7588  * apply_link_downgrade_policy - Apply the link width downgrade enabled
7589  * policy against the current active link widths.
7590  * @ppd: info of physical Hfi port
7591  * @refresh_widths: True indicates link downgrade event
7592  * @return: True indicates a successful link downgrade. False indicates
7593  *          link downgrade event failed and the link will bounce back to
7594  *          default link width.
7595  *
7596  * Called when the enabled policy changes or the active link widths
7597  * change.
7598  * Refresh_widths indicates that a link downgrade occurred. The
7599  * link_downgraded variable is set by refresh_widths and
7600  * determines the success/failure of the policy application.
7601  */
7602 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7603                                  bool refresh_widths)
7604 {
7605         int do_bounce = 0;
7606         int tries;
7607         u16 lwde;
7608         u16 tx, rx;
7609         bool link_downgraded = refresh_widths;
7610
7611         /* use the hls lock to avoid a race with actual link up */
7612         tries = 0;
7613 retry:
7614         mutex_lock(&ppd->hls_lock);
7615         /* only apply if the link is up */
7616         if (ppd->host_link_state & HLS_DOWN) {
7617                 /* still going up..wait and retry */
7618                 if (ppd->host_link_state & HLS_GOING_UP) {
7619                         if (++tries < 1000) {
7620                                 mutex_unlock(&ppd->hls_lock);
7621                                 usleep_range(100, 120); /* arbitrary */
7622                                 goto retry;
7623                         }
7624                         dd_dev_err(ppd->dd,
7625                                    "%s: giving up waiting for link state change\n",
7626                                    __func__);
7627                 }
7628                 goto done;
7629         }
7630
7631         lwde = ppd->link_width_downgrade_enabled;
7632
7633         if (refresh_widths) {
7634                 get_link_widths(ppd->dd, &tx, &rx);
7635                 ppd->link_width_downgrade_tx_active = tx;
7636                 ppd->link_width_downgrade_rx_active = rx;
7637         }
7638
7639         if (ppd->link_width_downgrade_tx_active == 0 ||
7640             ppd->link_width_downgrade_rx_active == 0) {
7641                 /* the 8051 reported a dead link as a downgrade */
7642                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7643                 link_downgraded = false;
7644         } else if (lwde == 0) {
7645                 /* downgrade is disabled */
7646
7647                 /* bounce if not at starting active width */
7648                 if ((ppd->link_width_active !=
7649                      ppd->link_width_downgrade_tx_active) ||
7650                     (ppd->link_width_active !=
7651                      ppd->link_width_downgrade_rx_active)) {
7652                         dd_dev_err(ppd->dd,
7653                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7654                         dd_dev_err(ppd->dd,
7655                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7656                                    ppd->link_width_active,
7657                                    ppd->link_width_downgrade_tx_active,
7658                                    ppd->link_width_downgrade_rx_active);
7659                         do_bounce = 1;
7660                         link_downgraded = false;
7661                 }
7662         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7663                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7664                 /* Tx or Rx is outside the enabled policy */
7665                 dd_dev_err(ppd->dd,
7666                            "Link is outside of downgrade allowed, downing link\n");
7667                 dd_dev_err(ppd->dd,
7668                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7669                            lwde, ppd->link_width_downgrade_tx_active,
7670                            ppd->link_width_downgrade_rx_active);
7671                 do_bounce = 1;
7672                 link_downgraded = false;
7673         }
7674
7675 done:
7676         mutex_unlock(&ppd->hls_lock);
7677
7678         if (do_bounce) {
7679                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7680                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7681                 set_link_state(ppd, HLS_DN_OFFLINE);
7682                 start_link(ppd);
7683         }
7684
7685         return link_downgraded;
7686 }
7687
7688 /*
7689  * Handle a link downgrade interrupt from the 8051.
7690  *
7691  * This is a work-queue function outside of the interrupt.
7692  */
7693 void handle_link_downgrade(struct work_struct *work)
7694 {
7695         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7696                                                         link_downgrade_work);
7697
7698         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7699         if (apply_link_downgrade_policy(ppd, true))
7700                 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7701 }
7702
7703 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7704 {
7705         return flag_string(buf, buf_len, flags, dcc_err_flags,
7706                 ARRAY_SIZE(dcc_err_flags));
7707 }
7708
7709 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7710 {
7711         return flag_string(buf, buf_len, flags, lcb_err_flags,
7712                 ARRAY_SIZE(lcb_err_flags));
7713 }
7714
7715 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7716 {
7717         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7718                 ARRAY_SIZE(dc8051_err_flags));
7719 }
7720
7721 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7722 {
7723         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7724                 ARRAY_SIZE(dc8051_info_err_flags));
7725 }
7726
7727 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7728 {
7729         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7730                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7731 }
7732
7733 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7734 {
7735         struct hfi1_pportdata *ppd = dd->pport;
7736         u64 info, err, host_msg;
7737         int queue_link_down = 0;
7738         char buf[96];
7739
7740         /* look at the flags */
7741         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7742                 /* 8051 information set by firmware */
7743                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7744                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7745                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7746                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7747                 host_msg = (info >>
7748                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7749                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7750
7751                 /*
7752                  * Handle error flags.
7753                  */
7754                 if (err & FAILED_LNI) {
7755                         /*
7756                          * LNI error indications are cleared by the 8051
7757                          * only when starting polling.  Only pay attention
7758                          * to them when in the states that occur during
7759                          * LNI.
7760                          */
7761                         if (ppd->host_link_state
7762                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7763                                 queue_link_down = 1;
7764                                 dd_dev_info(dd, "Link error: %s\n",
7765                                             dc8051_info_err_string(buf,
7766                                                                    sizeof(buf),
7767                                                                    err &
7768                                                                    FAILED_LNI));
7769                         }
7770                         err &= ~(u64)FAILED_LNI;
7771                 }
7772                 /* unknown frames can happen durning LNI, just count */
7773                 if (err & UNKNOWN_FRAME) {
7774                         ppd->unknown_frame_count++;
7775                         err &= ~(u64)UNKNOWN_FRAME;
7776                 }
7777                 if (err) {
7778                         /* report remaining errors, but do not do anything */
7779                         dd_dev_err(dd, "8051 info error: %s\n",
7780                                    dc8051_info_err_string(buf, sizeof(buf),
7781                                                           err));
7782                 }
7783
7784                 /*
7785                  * Handle host message flags.
7786                  */
7787                 if (host_msg & HOST_REQ_DONE) {
7788                         /*
7789                          * Presently, the driver does a busy wait for
7790                          * host requests to complete.  This is only an
7791                          * informational message.
7792                          * NOTE: The 8051 clears the host message
7793                          * information *on the next 8051 command*.
7794                          * Therefore, when linkup is achieved,
7795                          * this flag will still be set.
7796                          */
7797                         host_msg &= ~(u64)HOST_REQ_DONE;
7798                 }
7799                 if (host_msg & BC_SMA_MSG) {
7800                         queue_work(ppd->link_wq, &ppd->sma_message_work);
7801                         host_msg &= ~(u64)BC_SMA_MSG;
7802                 }
7803                 if (host_msg & LINKUP_ACHIEVED) {
7804                         dd_dev_info(dd, "8051: Link up\n");
7805                         queue_work(ppd->link_wq, &ppd->link_up_work);
7806                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7807                 }
7808                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7809                         handle_8051_request(ppd);
7810                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7811                 }
7812                 if (host_msg & VERIFY_CAP_FRAME) {
7813                         queue_work(ppd->link_wq, &ppd->link_vc_work);
7814                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7815                 }
7816                 if (host_msg & LINK_GOING_DOWN) {
7817                         const char *extra = "";
7818                         /* no downgrade action needed if going down */
7819                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7820                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7821                                 extra = " (ignoring downgrade)";
7822                         }
7823                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7824                         queue_link_down = 1;
7825                         host_msg &= ~(u64)LINK_GOING_DOWN;
7826                 }
7827                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7828                         queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7829                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7830                 }
7831                 if (host_msg) {
7832                         /* report remaining messages, but do not do anything */
7833                         dd_dev_info(dd, "8051 info host message: %s\n",
7834                                     dc8051_info_host_msg_string(buf,
7835                                                                 sizeof(buf),
7836                                                                 host_msg));
7837                 }
7838
7839                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7840         }
7841         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7842                 /*
7843                  * Lost the 8051 heartbeat.  If this happens, we
7844                  * receive constant interrupts about it.  Disable
7845                  * the interrupt after the first.
7846                  */
7847                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7848                 write_csr(dd, DC_DC8051_ERR_EN,
7849                           read_csr(dd, DC_DC8051_ERR_EN) &
7850                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7851
7852                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7853         }
7854         if (reg) {
7855                 /* report the error, but do not do anything */
7856                 dd_dev_err(dd, "8051 error: %s\n",
7857                            dc8051_err_string(buf, sizeof(buf), reg));
7858         }
7859
7860         if (queue_link_down) {
7861                 /*
7862                  * if the link is already going down or disabled, do not
7863                  * queue another. If there's a link down entry already
7864                  * queued, don't queue another one.
7865                  */
7866                 if ((ppd->host_link_state &
7867                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7868                     ppd->link_enabled == 0) {
7869                         dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7870                                     __func__, ppd->host_link_state,
7871                                     ppd->link_enabled);
7872                 } else {
7873                         if (xchg(&ppd->is_link_down_queued, 1) == 1)
7874                                 dd_dev_info(dd,
7875                                             "%s: link down request already queued\n",
7876                                             __func__);
7877                         else
7878                                 queue_work(ppd->link_wq, &ppd->link_down_work);
7879                 }
7880         }
7881 }
7882
7883 static const char * const fm_config_txt[] = {
7884 [0] =
7885         "BadHeadDist: Distance violation between two head flits",
7886 [1] =
7887         "BadTailDist: Distance violation between two tail flits",
7888 [2] =
7889         "BadCtrlDist: Distance violation between two credit control flits",
7890 [3] =
7891         "BadCrdAck: Credits return for unsupported VL",
7892 [4] =
7893         "UnsupportedVLMarker: Received VL Marker",
7894 [5] =
7895         "BadPreempt: Exceeded the preemption nesting level",
7896 [6] =
7897         "BadControlFlit: Received unsupported control flit",
7898 /* no 7 */
7899 [8] =
7900         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7901 };
7902
7903 static const char * const port_rcv_txt[] = {
7904 [1] =
7905         "BadPktLen: Illegal PktLen",
7906 [2] =
7907         "PktLenTooLong: Packet longer than PktLen",
7908 [3] =
7909         "PktLenTooShort: Packet shorter than PktLen",
7910 [4] =
7911         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7912 [5] =
7913         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7914 [6] =
7915         "BadL2: Illegal L2 opcode",
7916 [7] =
7917         "BadSC: Unsupported SC",
7918 [9] =
7919         "BadRC: Illegal RC",
7920 [11] =
7921         "PreemptError: Preempting with same VL",
7922 [12] =
7923         "PreemptVL15: Preempting a VL15 packet",
7924 };
7925
7926 #define OPA_LDR_FMCONFIG_OFFSET 16
7927 #define OPA_LDR_PORTRCV_OFFSET 0
7928 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7929 {
7930         u64 info, hdr0, hdr1;
7931         const char *extra;
7932         char buf[96];
7933         struct hfi1_pportdata *ppd = dd->pport;
7934         u8 lcl_reason = 0;
7935         int do_bounce = 0;
7936
7937         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7938                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7939                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7940                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7941                         /* set status bit */
7942                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7943                 }
7944                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7945         }
7946
7947         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7948                 struct hfi1_pportdata *ppd = dd->pport;
7949                 /* this counter saturates at (2^32) - 1 */
7950                 if (ppd->link_downed < (u32)UINT_MAX)
7951                         ppd->link_downed++;
7952                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7953         }
7954
7955         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7956                 u8 reason_valid = 1;
7957
7958                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7959                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7960                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7961                         /* set status bit */
7962                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7963                 }
7964                 switch (info) {
7965                 case 0:
7966                 case 1:
7967                 case 2:
7968                 case 3:
7969                 case 4:
7970                 case 5:
7971                 case 6:
7972                         extra = fm_config_txt[info];
7973                         break;
7974                 case 8:
7975                         extra = fm_config_txt[info];
7976                         if (ppd->port_error_action &
7977                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7978                                 do_bounce = 1;
7979                                 /*
7980                                  * lcl_reason cannot be derived from info
7981                                  * for this error
7982                                  */
7983                                 lcl_reason =
7984                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7985                         }
7986                         break;
7987                 default:
7988                         reason_valid = 0;
7989                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7990                         extra = buf;
7991                         break;
7992                 }
7993
7994                 if (reason_valid && !do_bounce) {
7995                         do_bounce = ppd->port_error_action &
7996                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7997                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7998                 }
7999
8000                 /* just report this */
8001                 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8002                                         extra);
8003                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8004         }
8005
8006         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8007                 u8 reason_valid = 1;
8008
8009                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8010                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8011                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8012                 if (!(dd->err_info_rcvport.status_and_code &
8013                       OPA_EI_STATUS_SMASK)) {
8014                         dd->err_info_rcvport.status_and_code =
8015                                 info & OPA_EI_CODE_SMASK;
8016                         /* set status bit */
8017                         dd->err_info_rcvport.status_and_code |=
8018                                 OPA_EI_STATUS_SMASK;
8019                         /*
8020                          * save first 2 flits in the packet that caused
8021                          * the error
8022                          */
8023                         dd->err_info_rcvport.packet_flit1 = hdr0;
8024                         dd->err_info_rcvport.packet_flit2 = hdr1;
8025                 }
8026                 switch (info) {
8027                 case 1:
8028                 case 2:
8029                 case 3:
8030                 case 4:
8031                 case 5:
8032                 case 6:
8033                 case 7:
8034                 case 9:
8035                 case 11:
8036                 case 12:
8037                         extra = port_rcv_txt[info];
8038                         break;
8039                 default:
8040                         reason_valid = 0;
8041                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8042                         extra = buf;
8043                         break;
8044                 }
8045
8046                 if (reason_valid && !do_bounce) {
8047                         do_bounce = ppd->port_error_action &
8048                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8049                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8050                 }
8051
8052                 /* just report this */
8053                 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8054                                         "               hdr0 0x%llx, hdr1 0x%llx\n",
8055                                         extra, hdr0, hdr1);
8056
8057                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8058         }
8059
8060         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8061                 /* informative only */
8062                 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8063                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8064         }
8065         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8066                 /* informative only */
8067                 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8068                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8069         }
8070
8071         if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8072                 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8073
8074         /* report any remaining errors */
8075         if (reg)
8076                 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8077                                         dcc_err_string(buf, sizeof(buf), reg));
8078
8079         if (lcl_reason == 0)
8080                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8081
8082         if (do_bounce) {
8083                 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8084                                         __func__);
8085                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8086                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8087         }
8088 }
8089
8090 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8091 {
8092         char buf[96];
8093
8094         dd_dev_info(dd, "LCB Error: %s\n",
8095                     lcb_err_string(buf, sizeof(buf), reg));
8096 }
8097
8098 /*
8099  * CCE block DC interrupt.  Source is < 8.
8100  */
8101 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8102 {
8103         const struct err_reg_info *eri = &dc_errs[source];
8104
8105         if (eri->handler) {
8106                 interrupt_clear_down(dd, 0, eri);
8107         } else if (source == 3 /* dc_lbm_int */) {
8108                 /*
8109                  * This indicates that a parity error has occurred on the
8110                  * address/control lines presented to the LBM.  The error
8111                  * is a single pulse, there is no associated error flag,
8112                  * and it is non-maskable.  This is because if a parity
8113                  * error occurs on the request the request is dropped.
8114                  * This should never occur, but it is nice to know if it
8115                  * ever does.
8116                  */
8117                 dd_dev_err(dd, "Parity error in DC LBM block\n");
8118         } else {
8119                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8120         }
8121 }
8122
8123 /*
8124  * TX block send credit interrupt.  Source is < 160.
8125  */
8126 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8127 {
8128         sc_group_release_update(dd, source);
8129 }
8130
8131 /*
8132  * TX block SDMA interrupt.  Source is < 48.
8133  *
8134  * SDMA interrupts are grouped by type:
8135  *
8136  *       0 -  N-1 = SDma
8137  *       N - 2N-1 = SDmaProgress
8138  *      2N - 3N-1 = SDmaIdle
8139  */
8140 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8141 {
8142         /* what interrupt */
8143         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8144         /* which engine */
8145         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8146
8147 #ifdef CONFIG_SDMA_VERBOSITY
8148         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8149                    slashstrip(__FILE__), __LINE__, __func__);
8150         sdma_dumpstate(&dd->per_sdma[which]);
8151 #endif
8152
8153         if (likely(what < 3 && which < dd->num_sdma)) {
8154                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8155         } else {
8156                 /* should not happen */
8157                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8158         }
8159 }
8160
8161 /**
8162  * is_rcv_avail_int() - User receive context available IRQ handler
8163  * @dd: valid dd
8164  * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8165  *
8166  * RX block receive available interrupt.  Source is < 160.
8167  *
8168  * This is the general interrupt handler for user (PSM) receive contexts,
8169  * and can only be used for non-threaded IRQs.
8170  */
8171 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8172 {
8173         struct hfi1_ctxtdata *rcd;
8174         char *err_detail;
8175
8176         if (likely(source < dd->num_rcv_contexts)) {
8177                 rcd = hfi1_rcd_get_by_index(dd, source);
8178                 if (rcd) {
8179                         handle_user_interrupt(rcd);
8180                         hfi1_rcd_put(rcd);
8181                         return; /* OK */
8182                 }
8183                 /* received an interrupt, but no rcd */
8184                 err_detail = "dataless";
8185         } else {
8186                 /* received an interrupt, but are not using that context */
8187                 err_detail = "out of range";
8188         }
8189         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8190                    err_detail, source);
8191 }
8192
8193 /**
8194  * is_rcv_urgent_int() - User receive context urgent IRQ handler
8195  * @dd: valid dd
8196  * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
8197  *
8198  * RX block receive urgent interrupt.  Source is < 160.
8199  *
8200  * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8201  */
8202 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8203 {
8204         struct hfi1_ctxtdata *rcd;
8205         char *err_detail;
8206
8207         if (likely(source < dd->num_rcv_contexts)) {
8208                 rcd = hfi1_rcd_get_by_index(dd, source);
8209                 if (rcd) {
8210                         handle_user_interrupt(rcd);
8211                         hfi1_rcd_put(rcd);
8212                         return; /* OK */
8213                 }
8214                 /* received an interrupt, but no rcd */
8215                 err_detail = "dataless";
8216         } else {
8217                 /* received an interrupt, but are not using that context */
8218                 err_detail = "out of range";
8219         }
8220         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8221                    err_detail, source);
8222 }
8223
8224 /*
8225  * Reserved range interrupt.  Should not be called in normal operation.
8226  */
8227 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8228 {
8229         char name[64];
8230
8231         dd_dev_err(dd, "unexpected %s interrupt\n",
8232                    is_reserved_name(name, sizeof(name), source));
8233 }
8234
8235 static const struct is_table is_table[] = {
8236 /*
8237  * start                 end
8238  *                              name func               interrupt func
8239  */
8240 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8241                                 is_misc_err_name,       is_misc_err_int },
8242 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8243                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8244 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8245                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8246 { IS_SDMA_START,             IS_SDMA_END,
8247                                 is_sdma_eng_name,       is_sdma_eng_int },
8248 { IS_VARIOUS_START,          IS_VARIOUS_END,
8249                                 is_various_name,        is_various_int },
8250 { IS_DC_START,       IS_DC_END,
8251                                 is_dc_name,             is_dc_int },
8252 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8253                                 is_rcv_avail_name,      is_rcv_avail_int },
8254 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8255                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8256 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8257                                 is_send_credit_name,    is_send_credit_int},
8258 { IS_RESERVED_START,     IS_RESERVED_END,
8259                                 is_reserved_name,       is_reserved_int},
8260 };
8261
8262 /*
8263  * Interrupt source interrupt - called when the given source has an interrupt.
8264  * Source is a bit index into an array of 64-bit integers.
8265  */
8266 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8267 {
8268         const struct is_table *entry;
8269
8270         /* avoids a double compare by walking the table in-order */
8271         for (entry = &is_table[0]; entry->is_name; entry++) {
8272                 if (source < entry->end) {
8273                         trace_hfi1_interrupt(dd, entry, source);
8274                         entry->is_int(dd, source - entry->start);
8275                         return;
8276                 }
8277         }
8278         /* fell off the end */
8279         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8280 }
8281
8282 /**
8283  * gerneral_interrupt() -  General interrupt handler
8284  * @irq: MSIx IRQ vector
8285  * @data: hfi1 devdata
8286  *
8287  * This is able to correctly handle all non-threaded interrupts.  Receive
8288  * context DATA IRQs are threaded and are not supported by this handler.
8289  *
8290  */
8291 static irqreturn_t general_interrupt(int irq, void *data)
8292 {
8293         struct hfi1_devdata *dd = data;
8294         u64 regs[CCE_NUM_INT_CSRS];
8295         u32 bit;
8296         int i;
8297         irqreturn_t handled = IRQ_NONE;
8298
8299         this_cpu_inc(*dd->int_counter);
8300
8301         /* phase 1: scan and clear all handled interrupts */
8302         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8303                 if (dd->gi_mask[i] == 0) {
8304                         regs[i] = 0;    /* used later */
8305                         continue;
8306                 }
8307                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8308                                 dd->gi_mask[i];
8309                 /* only clear if anything is set */
8310                 if (regs[i])
8311                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8312         }
8313
8314         /* phase 2: call the appropriate handler */
8315         for_each_set_bit(bit, (unsigned long *)&regs[0],
8316                          CCE_NUM_INT_CSRS * 64) {
8317                 is_interrupt(dd, bit);
8318                 handled = IRQ_HANDLED;
8319         }
8320
8321         return handled;
8322 }
8323
8324 static irqreturn_t sdma_interrupt(int irq, void *data)
8325 {
8326         struct sdma_engine *sde = data;
8327         struct hfi1_devdata *dd = sde->dd;
8328         u64 status;
8329
8330 #ifdef CONFIG_SDMA_VERBOSITY
8331         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8332                    slashstrip(__FILE__), __LINE__, __func__);
8333         sdma_dumpstate(sde);
8334 #endif
8335
8336         this_cpu_inc(*dd->int_counter);
8337
8338         /* This read_csr is really bad in the hot path */
8339         status = read_csr(dd,
8340                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8341                           & sde->imask;
8342         if (likely(status)) {
8343                 /* clear the interrupt(s) */
8344                 write_csr(dd,
8345                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8346                           status);
8347
8348                 /* handle the interrupt(s) */
8349                 sdma_engine_interrupt(sde, status);
8350         } else {
8351                 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8352                                         sde->this_idx);
8353         }
8354         return IRQ_HANDLED;
8355 }
8356
8357 /*
8358  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8359  * to insure that the write completed.  This does NOT guarantee that
8360  * queued DMA writes to memory from the chip are pushed.
8361  */
8362 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8363 {
8364         struct hfi1_devdata *dd = rcd->dd;
8365         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8366
8367         mmiowb();       /* make sure everything before is written */
8368         write_csr(dd, addr, rcd->imask);
8369         /* force the above write on the chip and get a value back */
8370         (void)read_csr(dd, addr);
8371 }
8372
8373 /* force the receive interrupt */
8374 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8375 {
8376         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8377 }
8378
8379 /*
8380  * Return non-zero if a packet is present.
8381  *
8382  * This routine is called when rechecking for packets after the RcvAvail
8383  * interrupt has been cleared down.  First, do a quick check of memory for
8384  * a packet present.  If not found, use an expensive CSR read of the context
8385  * tail to determine the actual tail.  The CSR read is necessary because there
8386  * is no method to push pending DMAs to memory other than an interrupt and we
8387  * are trying to determine if we need to force an interrupt.
8388  */
8389 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8390 {
8391         u32 tail;
8392         int present;
8393
8394         if (!rcd->rcvhdrtail_kvaddr)
8395                 present = (rcd->seq_cnt ==
8396                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8397         else /* is RDMA rtail */
8398                 present = (rcd->head != get_rcvhdrtail(rcd));
8399
8400         if (present)
8401                 return 1;
8402
8403         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8404         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8405         return rcd->head != tail;
8406 }
8407
8408 /*
8409  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8410  * This routine will try to handle packets immediately (latency), but if
8411  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8412  * chip receive interrupt is *not* cleared down until this or the thread (if
8413  * invoked) is finished.  The intent is to avoid extra interrupts while we
8414  * are processing packets anyway.
8415  */
8416 static irqreturn_t receive_context_interrupt(int irq, void *data)
8417 {
8418         struct hfi1_ctxtdata *rcd = data;
8419         struct hfi1_devdata *dd = rcd->dd;
8420         int disposition;
8421         int present;
8422
8423         trace_hfi1_receive_interrupt(dd, rcd);
8424         this_cpu_inc(*dd->int_counter);
8425         aspm_ctx_disable(rcd);
8426
8427         /* receive interrupt remains blocked while processing packets */
8428         disposition = rcd->do_interrupt(rcd, 0);
8429
8430         /*
8431          * Too many packets were seen while processing packets in this
8432          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8433          * remains blocked.
8434          */
8435         if (disposition == RCV_PKT_LIMIT)
8436                 return IRQ_WAKE_THREAD;
8437
8438         /*
8439          * The packet processor detected no more packets.  Clear the receive
8440          * interrupt and recheck for a packet packet that may have arrived
8441          * after the previous check and interrupt clear.  If a packet arrived,
8442          * force another interrupt.
8443          */
8444         clear_recv_intr(rcd);
8445         present = check_packet_present(rcd);
8446         if (present)
8447                 force_recv_intr(rcd);
8448
8449         return IRQ_HANDLED;
8450 }
8451
8452 /*
8453  * Receive packet thread handler.  This expects to be invoked with the
8454  * receive interrupt still blocked.
8455  */
8456 static irqreturn_t receive_context_thread(int irq, void *data)
8457 {
8458         struct hfi1_ctxtdata *rcd = data;
8459         int present;
8460
8461         /* receive interrupt is still blocked from the IRQ handler */
8462         (void)rcd->do_interrupt(rcd, 1);
8463
8464         /*
8465          * The packet processor will only return if it detected no more
8466          * packets.  Hold IRQs here so we can safely clear the interrupt and
8467          * recheck for a packet that may have arrived after the previous
8468          * check and the interrupt clear.  If a packet arrived, force another
8469          * interrupt.
8470          */
8471         local_irq_disable();
8472         clear_recv_intr(rcd);
8473         present = check_packet_present(rcd);
8474         if (present)
8475                 force_recv_intr(rcd);
8476         local_irq_enable();
8477
8478         return IRQ_HANDLED;
8479 }
8480
8481 /* ========================================================================= */
8482
8483 u32 read_physical_state(struct hfi1_devdata *dd)
8484 {
8485         u64 reg;
8486
8487         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8488         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8489                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8490 }
8491
8492 u32 read_logical_state(struct hfi1_devdata *dd)
8493 {
8494         u64 reg;
8495
8496         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8497         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8498                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8499 }
8500
8501 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8502 {
8503         u64 reg;
8504
8505         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8506         /* clear current state, set new state */
8507         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8508         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8509         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8510 }
8511
8512 /*
8513  * Use the 8051 to read a LCB CSR.
8514  */
8515 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8516 {
8517         u32 regno;
8518         int ret;
8519
8520         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8521                 if (acquire_lcb_access(dd, 0) == 0) {
8522                         *data = read_csr(dd, addr);
8523                         release_lcb_access(dd, 0);
8524                         return 0;
8525                 }
8526                 return -EBUSY;
8527         }
8528
8529         /* register is an index of LCB registers: (offset - base) / 8 */
8530         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8531         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8532         if (ret != HCMD_SUCCESS)
8533                 return -EBUSY;
8534         return 0;
8535 }
8536
8537 /*
8538  * Provide a cache for some of the LCB registers in case the LCB is
8539  * unavailable.
8540  * (The LCB is unavailable in certain link states, for example.)
8541  */
8542 struct lcb_datum {
8543         u32 off;
8544         u64 val;
8545 };
8546
8547 static struct lcb_datum lcb_cache[] = {
8548         { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8549         { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8550         { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8551 };
8552
8553 static void update_lcb_cache(struct hfi1_devdata *dd)
8554 {
8555         int i;
8556         int ret;
8557         u64 val;
8558
8559         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8560                 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8561
8562                 /* Update if we get good data */
8563                 if (likely(ret != -EBUSY))
8564                         lcb_cache[i].val = val;
8565         }
8566 }
8567
8568 static int read_lcb_cache(u32 off, u64 *val)
8569 {
8570         int i;
8571
8572         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8573                 if (lcb_cache[i].off == off) {
8574                         *val = lcb_cache[i].val;
8575                         return 0;
8576                 }
8577         }
8578
8579         pr_warn("%s bad offset 0x%x\n", __func__, off);
8580         return -1;
8581 }
8582
8583 /*
8584  * Read an LCB CSR.  Access may not be in host control, so check.
8585  * Return 0 on success, -EBUSY on failure.
8586  */
8587 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8588 {
8589         struct hfi1_pportdata *ppd = dd->pport;
8590
8591         /* if up, go through the 8051 for the value */
8592         if (ppd->host_link_state & HLS_UP)
8593                 return read_lcb_via_8051(dd, addr, data);
8594         /* if going up or down, check the cache, otherwise, no access */
8595         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8596                 if (read_lcb_cache(addr, data))
8597                         return -EBUSY;
8598                 return 0;
8599         }
8600
8601         /* otherwise, host has access */
8602         *data = read_csr(dd, addr);
8603         return 0;
8604 }
8605
8606 /*
8607  * Use the 8051 to write a LCB CSR.
8608  */
8609 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8610 {
8611         u32 regno;
8612         int ret;
8613
8614         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8615             (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8616                 if (acquire_lcb_access(dd, 0) == 0) {
8617                         write_csr(dd, addr, data);
8618                         release_lcb_access(dd, 0);
8619                         return 0;
8620                 }
8621                 return -EBUSY;
8622         }
8623
8624         /* register is an index of LCB registers: (offset - base) / 8 */
8625         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8626         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8627         if (ret != HCMD_SUCCESS)
8628                 return -EBUSY;
8629         return 0;
8630 }
8631
8632 /*
8633  * Write an LCB CSR.  Access may not be in host control, so check.
8634  * Return 0 on success, -EBUSY on failure.
8635  */
8636 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8637 {
8638         struct hfi1_pportdata *ppd = dd->pport;
8639
8640         /* if up, go through the 8051 for the value */
8641         if (ppd->host_link_state & HLS_UP)
8642                 return write_lcb_via_8051(dd, addr, data);
8643         /* if going up or down, no access */
8644         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8645                 return -EBUSY;
8646         /* otherwise, host has access */
8647         write_csr(dd, addr, data);
8648         return 0;
8649 }
8650
8651 /*
8652  * Returns:
8653  *      < 0 = Linux error, not able to get access
8654  *      > 0 = 8051 command RETURN_CODE
8655  */
8656 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8657                            u64 *out_data)
8658 {
8659         u64 reg, completed;
8660         int return_code;
8661         unsigned long timeout;
8662
8663         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8664
8665         mutex_lock(&dd->dc8051_lock);
8666
8667         /* We can't send any commands to the 8051 if it's in reset */
8668         if (dd->dc_shutdown) {
8669                 return_code = -ENODEV;
8670                 goto fail;
8671         }
8672
8673         /*
8674          * If an 8051 host command timed out previously, then the 8051 is
8675          * stuck.
8676          *
8677          * On first timeout, attempt to reset and restart the entire DC
8678          * block (including 8051). (Is this too big of a hammer?)
8679          *
8680          * If the 8051 times out a second time, the reset did not bring it
8681          * back to healthy life. In that case, fail any subsequent commands.
8682          */
8683         if (dd->dc8051_timed_out) {
8684                 if (dd->dc8051_timed_out > 1) {
8685                         dd_dev_err(dd,
8686                                    "Previous 8051 host command timed out, skipping command %u\n",
8687                                    type);
8688                         return_code = -ENXIO;
8689                         goto fail;
8690                 }
8691                 _dc_shutdown(dd);
8692                 _dc_start(dd);
8693         }
8694
8695         /*
8696          * If there is no timeout, then the 8051 command interface is
8697          * waiting for a command.
8698          */
8699
8700         /*
8701          * When writing a LCB CSR, out_data contains the full value to
8702          * to be written, while in_data contains the relative LCB
8703          * address in 7:0.  Do the work here, rather than the caller,
8704          * of distrubting the write data to where it needs to go:
8705          *
8706          * Write data
8707          *   39:00 -> in_data[47:8]
8708          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8709          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8710          */
8711         if (type == HCMD_WRITE_LCB_CSR) {
8712                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8713                 /* must preserve COMPLETED - it is tied to hardware */
8714                 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8715                 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8716                 reg |= ((((*out_data) >> 40) & 0xff) <<
8717                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8718                       | ((((*out_data) >> 48) & 0xffff) <<
8719                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8720                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8721         }
8722
8723         /*
8724          * Do two writes: the first to stabilize the type and req_data, the
8725          * second to activate.
8726          */
8727         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8728                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8729                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8730                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8731         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8732         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8733         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8734
8735         /* wait for completion, alternate: interrupt */
8736         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8737         while (1) {
8738                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8739                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8740                 if (completed)
8741                         break;
8742                 if (time_after(jiffies, timeout)) {
8743                         dd->dc8051_timed_out++;
8744                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8745                         if (out_data)
8746                                 *out_data = 0;
8747                         return_code = -ETIMEDOUT;
8748                         goto fail;
8749                 }
8750                 udelay(2);
8751         }
8752
8753         if (out_data) {
8754                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8755                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8756                 if (type == HCMD_READ_LCB_CSR) {
8757                         /* top 16 bits are in a different register */
8758                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8759                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8760                                 << (48
8761                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8762                 }
8763         }
8764         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8765                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8766         dd->dc8051_timed_out = 0;
8767         /*
8768          * Clear command for next user.
8769          */
8770         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8771
8772 fail:
8773         mutex_unlock(&dd->dc8051_lock);
8774         return return_code;
8775 }
8776
8777 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8778 {
8779         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8780 }
8781
8782 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8783                      u8 lane_id, u32 config_data)
8784 {
8785         u64 data;
8786         int ret;
8787
8788         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8789                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8790                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8791         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8792         if (ret != HCMD_SUCCESS) {
8793                 dd_dev_err(dd,
8794                            "load 8051 config: field id %d, lane %d, err %d\n",
8795                            (int)field_id, (int)lane_id, ret);
8796         }
8797         return ret;
8798 }
8799
8800 /*
8801  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8802  * set the result, even on error.
8803  * Return 0 on success, -errno on failure
8804  */
8805 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8806                      u32 *result)
8807 {
8808         u64 big_data;
8809         u32 addr;
8810         int ret;
8811
8812         /* address start depends on the lane_id */
8813         if (lane_id < 4)
8814                 addr = (4 * NUM_GENERAL_FIELDS)
8815                         + (lane_id * 4 * NUM_LANE_FIELDS);
8816         else
8817                 addr = 0;
8818         addr += field_id * 4;
8819
8820         /* read is in 8-byte chunks, hardware will truncate the address down */
8821         ret = read_8051_data(dd, addr, 8, &big_data);
8822
8823         if (ret == 0) {
8824                 /* extract the 4 bytes we want */
8825                 if (addr & 0x4)
8826                         *result = (u32)(big_data >> 32);
8827                 else
8828                         *result = (u32)big_data;
8829         } else {
8830                 *result = 0;
8831                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8832                            __func__, lane_id, field_id);
8833         }
8834
8835         return ret;
8836 }
8837
8838 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8839                               u8 continuous)
8840 {
8841         u32 frame;
8842
8843         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8844                 | power_management << POWER_MANAGEMENT_SHIFT;
8845         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8846                                 GENERAL_CONFIG, frame);
8847 }
8848
8849 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8850                                  u16 vl15buf, u8 crc_sizes)
8851 {
8852         u32 frame;
8853
8854         frame = (u32)vau << VAU_SHIFT
8855                 | (u32)z << Z_SHIFT
8856                 | (u32)vcu << VCU_SHIFT
8857                 | (u32)vl15buf << VL15BUF_SHIFT
8858                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8859         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8860                                 GENERAL_CONFIG, frame);
8861 }
8862
8863 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8864                                     u8 *flag_bits, u16 *link_widths)
8865 {
8866         u32 frame;
8867
8868         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8869                          &frame);
8870         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8871         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8872         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8873 }
8874
8875 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8876                                     u8 misc_bits,
8877                                     u8 flag_bits,
8878                                     u16 link_widths)
8879 {
8880         u32 frame;
8881
8882         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8883                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8884                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8885         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8886                      frame);
8887 }
8888
8889 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8890                                  u8 device_rev)
8891 {
8892         u32 frame;
8893
8894         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8895                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8896         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8897 }
8898
8899 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8900                                   u8 *device_rev)
8901 {
8902         u32 frame;
8903
8904         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8905         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8906         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8907                         & REMOTE_DEVICE_REV_MASK;
8908 }
8909
8910 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8911 {
8912         u32 frame;
8913         u32 mask;
8914
8915         mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8916         read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8917         /* Clear, then set field */
8918         frame &= ~mask;
8919         frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8920         return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8921                                 frame);
8922 }
8923
8924 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8925                       u8 *ver_patch)
8926 {
8927         u32 frame;
8928
8929         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8930         *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8931                 STS_FM_VERSION_MAJOR_MASK;
8932         *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8933                 STS_FM_VERSION_MINOR_MASK;
8934
8935         read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8936         *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8937                 STS_FM_VERSION_PATCH_MASK;
8938 }
8939
8940 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8941                                u8 *continuous)
8942 {
8943         u32 frame;
8944
8945         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8946         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8947                                         & POWER_MANAGEMENT_MASK;
8948         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8949                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8950 }
8951
8952 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8953                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8954 {
8955         u32 frame;
8956
8957         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8958         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8959         *z = (frame >> Z_SHIFT) & Z_MASK;
8960         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8961         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8962         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8963 }
8964
8965 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8966                                       u8 *remote_tx_rate,
8967                                       u16 *link_widths)
8968 {
8969         u32 frame;
8970
8971         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8972                          &frame);
8973         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8974                                 & REMOTE_TX_RATE_MASK;
8975         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8976 }
8977
8978 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8979 {
8980         u32 frame;
8981
8982         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8983         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8984 }
8985
8986 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8987 {
8988         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8989 }
8990
8991 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8992 {
8993         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8994 }
8995
8996 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8997 {
8998         u32 frame;
8999         int ret;
9000
9001         *link_quality = 0;
9002         if (dd->pport->host_link_state & HLS_UP) {
9003                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9004                                        &frame);
9005                 if (ret == 0)
9006                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
9007                                                 & LINK_QUALITY_MASK;
9008         }
9009 }
9010
9011 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9012 {
9013         u32 frame;
9014
9015         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9016         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9017 }
9018
9019 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9020 {
9021         u32 frame;
9022
9023         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9024         *ldr = (frame & 0xff);
9025 }
9026
9027 static int read_tx_settings(struct hfi1_devdata *dd,
9028                             u8 *enable_lane_tx,
9029                             u8 *tx_polarity_inversion,
9030                             u8 *rx_polarity_inversion,
9031                             u8 *max_rate)
9032 {
9033         u32 frame;
9034         int ret;
9035
9036         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9037         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9038                                 & ENABLE_LANE_TX_MASK;
9039         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9040                                 & TX_POLARITY_INVERSION_MASK;
9041         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9042                                 & RX_POLARITY_INVERSION_MASK;
9043         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9044         return ret;
9045 }
9046
9047 static int write_tx_settings(struct hfi1_devdata *dd,
9048                              u8 enable_lane_tx,
9049                              u8 tx_polarity_inversion,
9050                              u8 rx_polarity_inversion,
9051                              u8 max_rate)
9052 {
9053         u32 frame;
9054
9055         /* no need to mask, all variable sizes match field widths */
9056         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9057                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9058                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9059                 | max_rate << MAX_RATE_SHIFT;
9060         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9061 }
9062
9063 /*
9064  * Read an idle LCB message.
9065  *
9066  * Returns 0 on success, -EINVAL on error
9067  */
9068 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9069 {
9070         int ret;
9071
9072         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9073         if (ret != HCMD_SUCCESS) {
9074                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9075                            (u32)type, ret);
9076                 return -EINVAL;
9077         }
9078         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9079         /* return only the payload as we already know the type */
9080         *data_out >>= IDLE_PAYLOAD_SHIFT;
9081         return 0;
9082 }
9083
9084 /*
9085  * Read an idle SMA message.  To be done in response to a notification from
9086  * the 8051.
9087  *
9088  * Returns 0 on success, -EINVAL on error
9089  */
9090 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9091 {
9092         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9093                                  data);
9094 }
9095
9096 /*
9097  * Send an idle LCB message.
9098  *
9099  * Returns 0 on success, -EINVAL on error
9100  */
9101 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9102 {
9103         int ret;
9104
9105         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9106         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9107         if (ret != HCMD_SUCCESS) {
9108                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9109                            data, ret);
9110                 return -EINVAL;
9111         }
9112         return 0;
9113 }
9114
9115 /*
9116  * Send an idle SMA message.
9117  *
9118  * Returns 0 on success, -EINVAL on error
9119  */
9120 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9121 {
9122         u64 data;
9123
9124         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9125                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9126         return send_idle_message(dd, data);
9127 }
9128
9129 /*
9130  * Initialize the LCB then do a quick link up.  This may or may not be
9131  * in loopback.
9132  *
9133  * return 0 on success, -errno on error
9134  */
9135 static int do_quick_linkup(struct hfi1_devdata *dd)
9136 {
9137         int ret;
9138
9139         lcb_shutdown(dd, 0);
9140
9141         if (loopback) {
9142                 /* LCB_CFG_LOOPBACK.VAL = 2 */
9143                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9144                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9145                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9146                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9147         }
9148
9149         /* start the LCBs */
9150         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9151         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9152
9153         /* simulator only loopback steps */
9154         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9155                 /* LCB_CFG_RUN.EN = 1 */
9156                 write_csr(dd, DC_LCB_CFG_RUN,
9157                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9158
9159                 ret = wait_link_transfer_active(dd, 10);
9160                 if (ret)
9161                         return ret;
9162
9163                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9164                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9165         }
9166
9167         if (!loopback) {
9168                 /*
9169                  * When doing quick linkup and not in loopback, both
9170                  * sides must be done with LCB set-up before either
9171                  * starts the quick linkup.  Put a delay here so that
9172                  * both sides can be started and have a chance to be
9173                  * done with LCB set up before resuming.
9174                  */
9175                 dd_dev_err(dd,
9176                            "Pausing for peer to be finished with LCB set up\n");
9177                 msleep(5000);
9178                 dd_dev_err(dd, "Continuing with quick linkup\n");
9179         }
9180
9181         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9182         set_8051_lcb_access(dd);
9183
9184         /*
9185          * State "quick" LinkUp request sets the physical link state to
9186          * LinkUp without a verify capability sequence.
9187          * This state is in simulator v37 and later.
9188          */
9189         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9190         if (ret != HCMD_SUCCESS) {
9191                 dd_dev_err(dd,
9192                            "%s: set physical link state to quick LinkUp failed with return %d\n",
9193                            __func__, ret);
9194
9195                 set_host_lcb_access(dd);
9196                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9197
9198                 if (ret >= 0)
9199                         ret = -EINVAL;
9200                 return ret;
9201         }
9202
9203         return 0; /* success */
9204 }
9205
9206 /*
9207  * Do all special steps to set up loopback.
9208  */
9209 static int init_loopback(struct hfi1_devdata *dd)
9210 {
9211         dd_dev_info(dd, "Entering loopback mode\n");
9212
9213         /* all loopbacks should disable self GUID check */
9214         write_csr(dd, DC_DC8051_CFG_MODE,
9215                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9216
9217         /*
9218          * The simulator has only one loopback option - LCB.  Switch
9219          * to that option, which includes quick link up.
9220          *
9221          * Accept all valid loopback values.
9222          */
9223         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9224             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9225              loopback == LOOPBACK_CABLE)) {
9226                 loopback = LOOPBACK_LCB;
9227                 quick_linkup = 1;
9228                 return 0;
9229         }
9230
9231         /*
9232          * SerDes loopback init sequence is handled in set_local_link_attributes
9233          */
9234         if (loopback == LOOPBACK_SERDES)
9235                 return 0;
9236
9237         /* LCB loopback - handled at poll time */
9238         if (loopback == LOOPBACK_LCB) {
9239                 quick_linkup = 1; /* LCB is always quick linkup */
9240
9241                 /* not supported in emulation due to emulation RTL changes */
9242                 if (dd->icode == ICODE_FPGA_EMULATION) {
9243                         dd_dev_err(dd,
9244                                    "LCB loopback not supported in emulation\n");
9245                         return -EINVAL;
9246                 }
9247                 return 0;
9248         }
9249
9250         /* external cable loopback requires no extra steps */
9251         if (loopback == LOOPBACK_CABLE)
9252                 return 0;
9253
9254         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9255         return -EINVAL;
9256 }
9257
9258 /*
9259  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9260  * used in the Verify Capability link width attribute.
9261  */
9262 static u16 opa_to_vc_link_widths(u16 opa_widths)
9263 {
9264         int i;
9265         u16 result = 0;
9266
9267         static const struct link_bits {
9268                 u16 from;
9269                 u16 to;
9270         } opa_link_xlate[] = {
9271                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9272                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9273                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9274                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9275         };
9276
9277         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9278                 if (opa_widths & opa_link_xlate[i].from)
9279                         result |= opa_link_xlate[i].to;
9280         }
9281         return result;
9282 }
9283
9284 /*
9285  * Set link attributes before moving to polling.
9286  */
9287 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9288 {
9289         struct hfi1_devdata *dd = ppd->dd;
9290         u8 enable_lane_tx;
9291         u8 tx_polarity_inversion;
9292         u8 rx_polarity_inversion;
9293         int ret;
9294         u32 misc_bits = 0;
9295         /* reset our fabric serdes to clear any lingering problems */
9296         fabric_serdes_reset(dd);
9297
9298         /* set the local tx rate - need to read-modify-write */
9299         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9300                                &rx_polarity_inversion, &ppd->local_tx_rate);
9301         if (ret)
9302                 goto set_local_link_attributes_fail;
9303
9304         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9305                 /* set the tx rate to the fastest enabled */
9306                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9307                         ppd->local_tx_rate = 1;
9308                 else
9309                         ppd->local_tx_rate = 0;
9310         } else {
9311                 /* set the tx rate to all enabled */
9312                 ppd->local_tx_rate = 0;
9313                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9314                         ppd->local_tx_rate |= 2;
9315                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9316                         ppd->local_tx_rate |= 1;
9317         }
9318
9319         enable_lane_tx = 0xF; /* enable all four lanes */
9320         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9321                                 rx_polarity_inversion, ppd->local_tx_rate);
9322         if (ret != HCMD_SUCCESS)
9323                 goto set_local_link_attributes_fail;
9324
9325         ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9326         if (ret != HCMD_SUCCESS) {
9327                 dd_dev_err(dd,
9328                            "Failed to set host interface version, return 0x%x\n",
9329                            ret);
9330                 goto set_local_link_attributes_fail;
9331         }
9332
9333         /*
9334          * DC supports continuous updates.
9335          */
9336         ret = write_vc_local_phy(dd,
9337                                  0 /* no power management */,
9338                                  1 /* continuous updates */);
9339         if (ret != HCMD_SUCCESS)
9340                 goto set_local_link_attributes_fail;
9341
9342         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9343         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9344                                     ppd->port_crc_mode_enabled);
9345         if (ret != HCMD_SUCCESS)
9346                 goto set_local_link_attributes_fail;
9347
9348         /*
9349          * SerDes loopback init sequence requires
9350          * setting bit 0 of MISC_CONFIG_BITS
9351          */
9352         if (loopback == LOOPBACK_SERDES)
9353                 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9354
9355         /*
9356          * An external device configuration request is used to reset the LCB
9357          * to retry to obtain operational lanes when the first attempt is
9358          * unsuccesful.
9359          */
9360         if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9361                 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9362
9363         ret = write_vc_local_link_mode(dd, misc_bits, 0,
9364                                        opa_to_vc_link_widths(
9365                                                 ppd->link_width_enabled));
9366         if (ret != HCMD_SUCCESS)
9367                 goto set_local_link_attributes_fail;
9368
9369         /* let peer know who we are */
9370         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9371         if (ret == HCMD_SUCCESS)
9372                 return 0;
9373
9374 set_local_link_attributes_fail:
9375         dd_dev_err(dd,
9376                    "Failed to set local link attributes, return 0x%x\n",
9377                    ret);
9378         return ret;
9379 }
9380
9381 /*
9382  * Call this to start the link.
9383  * Do not do anything if the link is disabled.
9384  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9385  */
9386 int start_link(struct hfi1_pportdata *ppd)
9387 {
9388         /*
9389          * Tune the SerDes to a ballpark setting for optimal signal and bit
9390          * error rate.  Needs to be done before starting the link.
9391          */
9392         tune_serdes(ppd);
9393
9394         if (!ppd->driver_link_ready) {
9395                 dd_dev_info(ppd->dd,
9396                             "%s: stopping link start because driver is not ready\n",
9397                             __func__);
9398                 return 0;
9399         }
9400
9401         /*
9402          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9403          * pkey table can be configured properly if the HFI unit is connected
9404          * to switch port with MgmtAllowed=NO
9405          */
9406         clear_full_mgmt_pkey(ppd);
9407
9408         return set_link_state(ppd, HLS_DN_POLL);
9409 }
9410
9411 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9412 {
9413         struct hfi1_devdata *dd = ppd->dd;
9414         u64 mask;
9415         unsigned long timeout;
9416
9417         /*
9418          * Some QSFP cables have a quirk that asserts the IntN line as a side
9419          * effect of power up on plug-in. We ignore this false positive
9420          * interrupt until the module has finished powering up by waiting for
9421          * a minimum timeout of the module inrush initialization time of
9422          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9423          * module have stabilized.
9424          */
9425         msleep(500);
9426
9427         /*
9428          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9429          */
9430         timeout = jiffies + msecs_to_jiffies(2000);
9431         while (1) {
9432                 mask = read_csr(dd, dd->hfi1_id ?
9433                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9434                 if (!(mask & QSFP_HFI0_INT_N))
9435                         break;
9436                 if (time_after(jiffies, timeout)) {
9437                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9438                                     __func__);
9439                         break;
9440                 }
9441                 udelay(2);
9442         }
9443 }
9444
9445 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9446 {
9447         struct hfi1_devdata *dd = ppd->dd;
9448         u64 mask;
9449
9450         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9451         if (enable) {
9452                 /*
9453                  * Clear the status register to avoid an immediate interrupt
9454                  * when we re-enable the IntN pin
9455                  */
9456                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9457                           QSFP_HFI0_INT_N);
9458                 mask |= (u64)QSFP_HFI0_INT_N;
9459         } else {
9460                 mask &= ~(u64)QSFP_HFI0_INT_N;
9461         }
9462         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9463 }
9464
9465 int reset_qsfp(struct hfi1_pportdata *ppd)
9466 {
9467         struct hfi1_devdata *dd = ppd->dd;
9468         u64 mask, qsfp_mask;
9469
9470         /* Disable INT_N from triggering QSFP interrupts */
9471         set_qsfp_int_n(ppd, 0);
9472
9473         /* Reset the QSFP */
9474         mask = (u64)QSFP_HFI0_RESET_N;
9475
9476         qsfp_mask = read_csr(dd,
9477                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9478         qsfp_mask &= ~mask;
9479         write_csr(dd,
9480                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9481
9482         udelay(10);
9483
9484         qsfp_mask |= mask;
9485         write_csr(dd,
9486                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9487
9488         wait_for_qsfp_init(ppd);
9489
9490         /*
9491          * Allow INT_N to trigger the QSFP interrupt to watch
9492          * for alarms and warnings
9493          */
9494         set_qsfp_int_n(ppd, 1);
9495
9496         /*
9497          * After the reset, AOC transmitters are enabled by default. They need
9498          * to be turned off to complete the QSFP setup before they can be
9499          * enabled again.
9500          */
9501         return set_qsfp_tx(ppd, 0);
9502 }
9503
9504 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9505                                         u8 *qsfp_interrupt_status)
9506 {
9507         struct hfi1_devdata *dd = ppd->dd;
9508
9509         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9510             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9511                 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9512                            __func__);
9513
9514         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9515             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9516                 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9517                            __func__);
9518
9519         /*
9520          * The remaining alarms/warnings don't matter if the link is down.
9521          */
9522         if (ppd->host_link_state & HLS_DOWN)
9523                 return 0;
9524
9525         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9526             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9527                 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9528                            __func__);
9529
9530         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9531             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9532                 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9533                            __func__);
9534
9535         /* Byte 2 is vendor specific */
9536
9537         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9538             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9539                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9540                            __func__);
9541
9542         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9543             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9544                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9545                            __func__);
9546
9547         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9548             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9549                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9550                            __func__);
9551
9552         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9553             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9554                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9555                            __func__);
9556
9557         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9558             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9559                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9560                            __func__);
9561
9562         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9563             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9564                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9565                            __func__);
9566
9567         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9568             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9569                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9570                            __func__);
9571
9572         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9573             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9574                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9575                            __func__);
9576
9577         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9578             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9579                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9580                            __func__);
9581
9582         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9583             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9584                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9585                            __func__);
9586
9587         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9588             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9589                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9590                            __func__);
9591
9592         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9593             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9594                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9595                            __func__);
9596
9597         /* Bytes 9-10 and 11-12 are reserved */
9598         /* Bytes 13-15 are vendor specific */
9599
9600         return 0;
9601 }
9602
9603 /* This routine will only be scheduled if the QSFP module present is asserted */
9604 void qsfp_event(struct work_struct *work)
9605 {
9606         struct qsfp_data *qd;
9607         struct hfi1_pportdata *ppd;
9608         struct hfi1_devdata *dd;
9609
9610         qd = container_of(work, struct qsfp_data, qsfp_work);
9611         ppd = qd->ppd;
9612         dd = ppd->dd;
9613
9614         /* Sanity check */
9615         if (!qsfp_mod_present(ppd))
9616                 return;
9617
9618         if (ppd->host_link_state == HLS_DN_DISABLE) {
9619                 dd_dev_info(ppd->dd,
9620                             "%s: stopping link start because link is disabled\n",
9621                             __func__);
9622                 return;
9623         }
9624
9625         /*
9626          * Turn DC back on after cable has been re-inserted. Up until
9627          * now, the DC has been in reset to save power.
9628          */
9629         dc_start(dd);
9630
9631         if (qd->cache_refresh_required) {
9632                 set_qsfp_int_n(ppd, 0);
9633
9634                 wait_for_qsfp_init(ppd);
9635
9636                 /*
9637                  * Allow INT_N to trigger the QSFP interrupt to watch
9638                  * for alarms and warnings
9639                  */
9640                 set_qsfp_int_n(ppd, 1);
9641
9642                 start_link(ppd);
9643         }
9644
9645         if (qd->check_interrupt_flags) {
9646                 u8 qsfp_interrupt_status[16] = {0,};
9647
9648                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9649                                   &qsfp_interrupt_status[0], 16) != 16) {
9650                         dd_dev_info(dd,
9651                                     "%s: Failed to read status of QSFP module\n",
9652                                     __func__);
9653                 } else {
9654                         unsigned long flags;
9655
9656                         handle_qsfp_error_conditions(
9657                                         ppd, qsfp_interrupt_status);
9658                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9659                         ppd->qsfp_info.check_interrupt_flags = 0;
9660                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9661                                                flags);
9662                 }
9663         }
9664 }
9665
9666 static void init_qsfp_int(struct hfi1_devdata *dd)
9667 {
9668         struct hfi1_pportdata *ppd = dd->pport;
9669         u64 qsfp_mask, cce_int_mask;
9670         const int qsfp1_int_smask = QSFP1_INT % 64;
9671         const int qsfp2_int_smask = QSFP2_INT % 64;
9672
9673         /*
9674          * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9675          * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9676          * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9677          * the index of the appropriate CSR in the CCEIntMask CSR array
9678          */
9679         cce_int_mask = read_csr(dd, CCE_INT_MASK +
9680                                 (8 * (QSFP1_INT / 64)));
9681         if (dd->hfi1_id) {
9682                 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9683                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9684                           cce_int_mask);
9685         } else {
9686                 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9687                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9688                           cce_int_mask);
9689         }
9690
9691         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9692         /* Clear current status to avoid spurious interrupts */
9693         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9694                   qsfp_mask);
9695         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9696                   qsfp_mask);
9697
9698         set_qsfp_int_n(ppd, 0);
9699
9700         /* Handle active low nature of INT_N and MODPRST_N pins */
9701         if (qsfp_mod_present(ppd))
9702                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9703         write_csr(dd,
9704                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9705                   qsfp_mask);
9706 }
9707
9708 /*
9709  * Do a one-time initialize of the LCB block.
9710  */
9711 static void init_lcb(struct hfi1_devdata *dd)
9712 {
9713         /* simulator does not correctly handle LCB cclk loopback, skip */
9714         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9715                 return;
9716
9717         /* the DC has been reset earlier in the driver load */
9718
9719         /* set LCB for cclk loopback on the port */
9720         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9721         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9722         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9723         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9724         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9725         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9726         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9727 }
9728
9729 /*
9730  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9731  * on error.
9732  */
9733 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9734 {
9735         int ret;
9736         u8 status;
9737
9738         /*
9739          * Report success if not a QSFP or, if it is a QSFP, but the cable is
9740          * not present
9741          */
9742         if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9743                 return 0;
9744
9745         /* read byte 2, the status byte */
9746         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9747         if (ret < 0)
9748                 return ret;
9749         if (ret != 1)
9750                 return -EIO;
9751
9752         return 0; /* success */
9753 }
9754
9755 /*
9756  * Values for QSFP retry.
9757  *
9758  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9759  * arrived at from experience on a large cluster.
9760  */
9761 #define MAX_QSFP_RETRIES 20
9762 #define QSFP_RETRY_WAIT 500 /* msec */
9763
9764 /*
9765  * Try a QSFP read.  If it fails, schedule a retry for later.
9766  * Called on first link activation after driver load.
9767  */
9768 static void try_start_link(struct hfi1_pportdata *ppd)
9769 {
9770         if (test_qsfp_read(ppd)) {
9771                 /* read failed */
9772                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9773                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9774                         return;
9775                 }
9776                 dd_dev_info(ppd->dd,
9777                             "QSFP not responding, waiting and retrying %d\n",
9778                             (int)ppd->qsfp_retry_count);
9779                 ppd->qsfp_retry_count++;
9780                 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9781                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9782                 return;
9783         }
9784         ppd->qsfp_retry_count = 0;
9785
9786         start_link(ppd);
9787 }
9788
9789 /*
9790  * Workqueue function to start the link after a delay.
9791  */
9792 void handle_start_link(struct work_struct *work)
9793 {
9794         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9795                                                   start_link_work.work);
9796         try_start_link(ppd);
9797 }
9798
9799 int bringup_serdes(struct hfi1_pportdata *ppd)
9800 {
9801         struct hfi1_devdata *dd = ppd->dd;
9802         u64 guid;
9803         int ret;
9804
9805         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9806                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9807
9808         guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9809         if (!guid) {
9810                 if (dd->base_guid)
9811                         guid = dd->base_guid + ppd->port - 1;
9812                 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9813         }
9814
9815         /* Set linkinit_reason on power up per OPA spec */
9816         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9817
9818         /* one-time init of the LCB */
9819         init_lcb(dd);
9820
9821         if (loopback) {
9822                 ret = init_loopback(dd);
9823                 if (ret < 0)
9824                         return ret;
9825         }
9826
9827         get_port_type(ppd);
9828         if (ppd->port_type == PORT_TYPE_QSFP) {
9829                 set_qsfp_int_n(ppd, 0);
9830                 wait_for_qsfp_init(ppd);
9831                 set_qsfp_int_n(ppd, 1);
9832         }
9833
9834         try_start_link(ppd);
9835         return 0;
9836 }
9837
9838 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9839 {
9840         struct hfi1_devdata *dd = ppd->dd;
9841
9842         /*
9843          * Shut down the link and keep it down.   First turn off that the
9844          * driver wants to allow the link to be up (driver_link_ready).
9845          * Then make sure the link is not automatically restarted
9846          * (link_enabled).  Cancel any pending restart.  And finally
9847          * go offline.
9848          */
9849         ppd->driver_link_ready = 0;
9850         ppd->link_enabled = 0;
9851
9852         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9853         flush_delayed_work(&ppd->start_link_work);
9854         cancel_delayed_work_sync(&ppd->start_link_work);
9855
9856         ppd->offline_disabled_reason =
9857                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9858         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9859                              OPA_LINKDOWN_REASON_REBOOT);
9860         set_link_state(ppd, HLS_DN_OFFLINE);
9861
9862         /* disable the port */
9863         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9864         cancel_work_sync(&ppd->freeze_work);
9865 }
9866
9867 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9868 {
9869         struct hfi1_pportdata *ppd;
9870         int i;
9871
9872         ppd = (struct hfi1_pportdata *)(dd + 1);
9873         for (i = 0; i < dd->num_pports; i++, ppd++) {
9874                 ppd->ibport_data.rvp.rc_acks = NULL;
9875                 ppd->ibport_data.rvp.rc_qacks = NULL;
9876                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9877                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9878                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9879                 if (!ppd->ibport_data.rvp.rc_acks ||
9880                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9881                     !ppd->ibport_data.rvp.rc_qacks)
9882                         return -ENOMEM;
9883         }
9884
9885         return 0;
9886 }
9887
9888 /*
9889  * index is the index into the receive array
9890  */
9891 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9892                   u32 type, unsigned long pa, u16 order)
9893 {
9894         u64 reg;
9895
9896         if (!(dd->flags & HFI1_PRESENT))
9897                 goto done;
9898
9899         if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9900                 pa = 0;
9901                 order = 0;
9902         } else if (type > PT_INVALID) {
9903                 dd_dev_err(dd,
9904                            "unexpected receive array type %u for index %u, not handled\n",
9905                            type, index);
9906                 goto done;
9907         }
9908         trace_hfi1_put_tid(dd, index, type, pa, order);
9909
9910 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9911         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9912                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9913                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9914                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9915         trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9916         writeq(reg, dd->rcvarray_wc + (index * 8));
9917
9918         if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9919                 /*
9920                  * Eager entries are written and flushed
9921                  *
9922                  * Expected entries are flushed every 4 writes
9923                  */
9924                 flush_wc();
9925 done:
9926         return;
9927 }
9928
9929 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9930 {
9931         struct hfi1_devdata *dd = rcd->dd;
9932         u32 i;
9933
9934         /* this could be optimized */
9935         for (i = rcd->eager_base; i < rcd->eager_base +
9936                      rcd->egrbufs.alloced; i++)
9937                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9938
9939         for (i = rcd->expected_base;
9940                         i < rcd->expected_base + rcd->expected_count; i++)
9941                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9942 }
9943
9944 static const char * const ib_cfg_name_strings[] = {
9945         "HFI1_IB_CFG_LIDLMC",
9946         "HFI1_IB_CFG_LWID_DG_ENB",
9947         "HFI1_IB_CFG_LWID_ENB",
9948         "HFI1_IB_CFG_LWID",
9949         "HFI1_IB_CFG_SPD_ENB",
9950         "HFI1_IB_CFG_SPD",
9951         "HFI1_IB_CFG_RXPOL_ENB",
9952         "HFI1_IB_CFG_LREV_ENB",
9953         "HFI1_IB_CFG_LINKLATENCY",
9954         "HFI1_IB_CFG_HRTBT",
9955         "HFI1_IB_CFG_OP_VLS",
9956         "HFI1_IB_CFG_VL_HIGH_CAP",
9957         "HFI1_IB_CFG_VL_LOW_CAP",
9958         "HFI1_IB_CFG_OVERRUN_THRESH",
9959         "HFI1_IB_CFG_PHYERR_THRESH",
9960         "HFI1_IB_CFG_LINKDEFAULT",
9961         "HFI1_IB_CFG_PKEYS",
9962         "HFI1_IB_CFG_MTU",
9963         "HFI1_IB_CFG_LSTATE",
9964         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9965         "HFI1_IB_CFG_PMA_TICKS",
9966         "HFI1_IB_CFG_PORT"
9967 };
9968
9969 static const char *ib_cfg_name(int which)
9970 {
9971         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9972                 return "invalid";
9973         return ib_cfg_name_strings[which];
9974 }
9975
9976 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9977 {
9978         struct hfi1_devdata *dd = ppd->dd;
9979         int val = 0;
9980
9981         switch (which) {
9982         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9983                 val = ppd->link_width_enabled;
9984                 break;
9985         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9986                 val = ppd->link_width_active;
9987                 break;
9988         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9989                 val = ppd->link_speed_enabled;
9990                 break;
9991         case HFI1_IB_CFG_SPD: /* current Link speed */
9992                 val = ppd->link_speed_active;
9993                 break;
9994
9995         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9996         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9997         case HFI1_IB_CFG_LINKLATENCY:
9998                 goto unimplemented;
9999
10000         case HFI1_IB_CFG_OP_VLS:
10001                 val = ppd->actual_vls_operational;
10002                 break;
10003         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
10004                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10005                 break;
10006         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
10007                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10008                 break;
10009         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10010                 val = ppd->overrun_threshold;
10011                 break;
10012         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10013                 val = ppd->phy_error_threshold;
10014                 break;
10015         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10016                 val = HLS_DEFAULT;
10017                 break;
10018
10019         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10020         case HFI1_IB_CFG_PMA_TICKS:
10021         default:
10022 unimplemented:
10023                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10024                         dd_dev_info(
10025                                 dd,
10026                                 "%s: which %s: not implemented\n",
10027                                 __func__,
10028                                 ib_cfg_name(which));
10029                 break;
10030         }
10031
10032         return val;
10033 }
10034
10035 /*
10036  * The largest MAD packet size.
10037  */
10038 #define MAX_MAD_PACKET 2048
10039
10040 /*
10041  * Return the maximum header bytes that can go on the _wire_
10042  * for this device. This count includes the ICRC which is
10043  * not part of the packet held in memory but it is appended
10044  * by the HW.
10045  * This is dependent on the device's receive header entry size.
10046  * HFI allows this to be set per-receive context, but the
10047  * driver presently enforces a global value.
10048  */
10049 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10050 {
10051         /*
10052          * The maximum non-payload (MTU) bytes in LRH.PktLen are
10053          * the Receive Header Entry Size minus the PBC (or RHF) size
10054          * plus one DW for the ICRC appended by HW.
10055          *
10056          * dd->rcd[0].rcvhdrqentsize is in DW.
10057          * We use rcd[0] as all context will have the same value. Also,
10058          * the first kernel context would have been allocated by now so
10059          * we are guaranteed a valid value.
10060          */
10061         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10062 }
10063
10064 /*
10065  * Set Send Length
10066  * @ppd - per port data
10067  *
10068  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10069  * registers compare against LRH.PktLen, so use the max bytes included
10070  * in the LRH.
10071  *
10072  * This routine changes all VL values except VL15, which it maintains at
10073  * the same value.
10074  */
10075 static void set_send_length(struct hfi1_pportdata *ppd)
10076 {
10077         struct hfi1_devdata *dd = ppd->dd;
10078         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10079         u32 maxvlmtu = dd->vld[15].mtu;
10080         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10081                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10082                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10083         int i, j;
10084         u32 thres;
10085
10086         for (i = 0; i < ppd->vls_supported; i++) {
10087                 if (dd->vld[i].mtu > maxvlmtu)
10088                         maxvlmtu = dd->vld[i].mtu;
10089                 if (i <= 3)
10090                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10091                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10092                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10093                 else
10094                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10095                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10096                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10097         }
10098         write_csr(dd, SEND_LEN_CHECK0, len1);
10099         write_csr(dd, SEND_LEN_CHECK1, len2);
10100         /* adjust kernel credit return thresholds based on new MTUs */
10101         /* all kernel receive contexts have the same hdrqentsize */
10102         for (i = 0; i < ppd->vls_supported; i++) {
10103                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10104                             sc_mtu_to_threshold(dd->vld[i].sc,
10105                                                 dd->vld[i].mtu,
10106                                                 dd->rcd[0]->rcvhdrqentsize));
10107                 for (j = 0; j < INIT_SC_PER_VL; j++)
10108                         sc_set_cr_threshold(
10109                                         pio_select_send_context_vl(dd, j, i),
10110                                             thres);
10111         }
10112         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10113                     sc_mtu_to_threshold(dd->vld[15].sc,
10114                                         dd->vld[15].mtu,
10115                                         dd->rcd[0]->rcvhdrqentsize));
10116         sc_set_cr_threshold(dd->vld[15].sc, thres);
10117
10118         /* Adjust maximum MTU for the port in DC */
10119         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10120                 (ilog2(maxvlmtu >> 8) + 1);
10121         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10122         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10123         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10124                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10125         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10126 }
10127
10128 static void set_lidlmc(struct hfi1_pportdata *ppd)
10129 {
10130         int i;
10131         u64 sreg = 0;
10132         struct hfi1_devdata *dd = ppd->dd;
10133         u32 mask = ~((1U << ppd->lmc) - 1);
10134         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10135         u32 lid;
10136
10137         /*
10138          * Program 0 in CSR if port lid is extended. This prevents
10139          * 9B packets being sent out for large lids.
10140          */
10141         lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10142         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10143                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10144         c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10145                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10146               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10147                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10148         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10149
10150         /*
10151          * Iterate over all the send contexts and set their SLID check
10152          */
10153         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10154                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10155                (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10156                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10157
10158         for (i = 0; i < chip_send_contexts(dd); i++) {
10159                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10160                           i, (u32)sreg);
10161                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10162         }
10163
10164         /* Now we have to do the same thing for the sdma engines */
10165         sdma_update_lmc(dd, mask, lid);
10166 }
10167
10168 static const char *state_completed_string(u32 completed)
10169 {
10170         static const char * const state_completed[] = {
10171                 "EstablishComm",
10172                 "OptimizeEQ",
10173                 "VerifyCap"
10174         };
10175
10176         if (completed < ARRAY_SIZE(state_completed))
10177                 return state_completed[completed];
10178
10179         return "unknown";
10180 }
10181
10182 static const char all_lanes_dead_timeout_expired[] =
10183         "All lanes were inactive â€“ was the interconnect media removed?";
10184 static const char tx_out_of_policy[] =
10185         "Passing lanes on local port do not meet the local link width policy";
10186 static const char no_state_complete[] =
10187         "State timeout occurred before link partner completed the state";
10188 static const char * const state_complete_reasons[] = {
10189         [0x00] = "Reason unknown",
10190         [0x01] = "Link was halted by driver, refer to LinkDownReason",
10191         [0x02] = "Link partner reported failure",
10192         [0x10] = "Unable to achieve frame sync on any lane",
10193         [0x11] =
10194           "Unable to find a common bit rate with the link partner",
10195         [0x12] =
10196           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10197         [0x13] =
10198           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10199         [0x14] = no_state_complete,
10200         [0x15] =
10201           "State timeout occurred before link partner identified equalization presets",
10202         [0x16] =
10203           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10204         [0x17] = tx_out_of_policy,
10205         [0x20] = all_lanes_dead_timeout_expired,
10206         [0x21] =
10207           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10208         [0x22] = no_state_complete,
10209         [0x23] =
10210           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10211         [0x24] = tx_out_of_policy,
10212         [0x30] = all_lanes_dead_timeout_expired,
10213         [0x31] =
10214           "State timeout occurred waiting for host to process received frames",
10215         [0x32] = no_state_complete,
10216         [0x33] =
10217           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10218         [0x34] = tx_out_of_policy,
10219         [0x35] = "Negotiated link width is mutually exclusive",
10220         [0x36] =
10221           "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10222         [0x37] = "Unable to resolve secure data exchange",
10223 };
10224
10225 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10226                                                      u32 code)
10227 {
10228         const char *str = NULL;
10229
10230         if (code < ARRAY_SIZE(state_complete_reasons))
10231                 str = state_complete_reasons[code];
10232
10233         if (str)
10234                 return str;
10235         return "Reserved";
10236 }
10237
10238 /* describe the given last state complete frame */
10239 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10240                                   const char *prefix)
10241 {
10242         struct hfi1_devdata *dd = ppd->dd;
10243         u32 success;
10244         u32 state;
10245         u32 reason;
10246         u32 lanes;
10247
10248         /*
10249          * Decode frame:
10250          *  [ 0: 0] - success
10251          *  [ 3: 1] - state
10252          *  [ 7: 4] - next state timeout
10253          *  [15: 8] - reason code
10254          *  [31:16] - lanes
10255          */
10256         success = frame & 0x1;
10257         state = (frame >> 1) & 0x7;
10258         reason = (frame >> 8) & 0xff;
10259         lanes = (frame >> 16) & 0xffff;
10260
10261         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10262                    prefix, frame);
10263         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10264                    state_completed_string(state), state);
10265         dd_dev_err(dd, "    state successfully completed: %s\n",
10266                    success ? "yes" : "no");
10267         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10268                    reason, state_complete_reason_code_string(ppd, reason));
10269         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10270 }
10271
10272 /*
10273  * Read the last state complete frames and explain them.  This routine
10274  * expects to be called if the link went down during link negotiation
10275  * and initialization (LNI).  That is, anywhere between polling and link up.
10276  */
10277 static void check_lni_states(struct hfi1_pportdata *ppd)
10278 {
10279         u32 last_local_state;
10280         u32 last_remote_state;
10281
10282         read_last_local_state(ppd->dd, &last_local_state);
10283         read_last_remote_state(ppd->dd, &last_remote_state);
10284
10285         /*
10286          * Don't report anything if there is nothing to report.  A value of
10287          * 0 means the link was taken down while polling and there was no
10288          * training in-process.
10289          */
10290         if (last_local_state == 0 && last_remote_state == 0)
10291                 return;
10292
10293         decode_state_complete(ppd, last_local_state, "transmitted");
10294         decode_state_complete(ppd, last_remote_state, "received");
10295 }
10296
10297 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10298 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10299 {
10300         u64 reg;
10301         unsigned long timeout;
10302
10303         /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10304         timeout = jiffies + msecs_to_jiffies(wait_ms);
10305         while (1) {
10306                 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10307                 if (reg)
10308                         break;
10309                 if (time_after(jiffies, timeout)) {
10310                         dd_dev_err(dd,
10311                                    "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10312                         return -ETIMEDOUT;
10313                 }
10314                 udelay(2);
10315         }
10316         return 0;
10317 }
10318
10319 /* called when the logical link state is not down as it should be */
10320 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10321 {
10322         struct hfi1_devdata *dd = ppd->dd;
10323
10324         /*
10325          * Bring link up in LCB loopback
10326          */
10327         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10328         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10329                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10330
10331         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10332         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10333         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10334         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10335
10336         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10337         (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10338         udelay(3);
10339         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10340         write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10341
10342         wait_link_transfer_active(dd, 100);
10343
10344         /*
10345          * Bring the link down again.
10346          */
10347         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10348         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10349         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10350
10351         dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10352 }
10353
10354 /*
10355  * Helper for set_link_state().  Do not call except from that routine.
10356  * Expects ppd->hls_mutex to be held.
10357  *
10358  * @rem_reason value to be sent to the neighbor
10359  *
10360  * LinkDownReasons only set if transition succeeds.
10361  */
10362 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10363 {
10364         struct hfi1_devdata *dd = ppd->dd;
10365         u32 previous_state;
10366         int offline_state_ret;
10367         int ret;
10368
10369         update_lcb_cache(dd);
10370
10371         previous_state = ppd->host_link_state;
10372         ppd->host_link_state = HLS_GOING_OFFLINE;
10373
10374         /* start offline transition */
10375         ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10376
10377         if (ret != HCMD_SUCCESS) {
10378                 dd_dev_err(dd,
10379                            "Failed to transition to Offline link state, return %d\n",
10380                            ret);
10381                 return -EINVAL;
10382         }
10383         if (ppd->offline_disabled_reason ==
10384                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10385                 ppd->offline_disabled_reason =
10386                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10387
10388         offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10389         if (offline_state_ret < 0)
10390                 return offline_state_ret;
10391
10392         /* Disabling AOC transmitters */
10393         if (ppd->port_type == PORT_TYPE_QSFP &&
10394             ppd->qsfp_info.limiting_active &&
10395             qsfp_mod_present(ppd)) {
10396                 int ret;
10397
10398                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10399                 if (ret == 0) {
10400                         set_qsfp_tx(ppd, 0);
10401                         release_chip_resource(dd, qsfp_resource(dd));
10402                 } else {
10403                         /* not fatal, but should warn */
10404                         dd_dev_err(dd,
10405                                    "Unable to acquire lock to turn off QSFP TX\n");
10406                 }
10407         }
10408
10409         /*
10410          * Wait for the offline.Quiet transition if it hasn't happened yet. It
10411          * can take a while for the link to go down.
10412          */
10413         if (offline_state_ret != PLS_OFFLINE_QUIET) {
10414                 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10415                 if (ret < 0)
10416                         return ret;
10417         }
10418
10419         /*
10420          * Now in charge of LCB - must be after the physical state is
10421          * offline.quiet and before host_link_state is changed.
10422          */
10423         set_host_lcb_access(dd);
10424         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10425
10426         /* make sure the logical state is also down */
10427         ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10428         if (ret)
10429                 force_logical_link_state_down(ppd);
10430
10431         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10432         update_statusp(ppd, IB_PORT_DOWN);
10433
10434         /*
10435          * The LNI has a mandatory wait time after the physical state
10436          * moves to Offline.Quiet.  The wait time may be different
10437          * depending on how the link went down.  The 8051 firmware
10438          * will observe the needed wait time and only move to ready
10439          * when that is completed.  The largest of the quiet timeouts
10440          * is 6s, so wait that long and then at least 0.5s more for
10441          * other transitions, and another 0.5s for a buffer.
10442          */
10443         ret = wait_fm_ready(dd, 7000);
10444         if (ret) {
10445                 dd_dev_err(dd,
10446                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10447                 /* state is really offline, so make it so */
10448                 ppd->host_link_state = HLS_DN_OFFLINE;
10449                 return ret;
10450         }
10451
10452         /*
10453          * The state is now offline and the 8051 is ready to accept host
10454          * requests.
10455          *      - change our state
10456          *      - notify others if we were previously in a linkup state
10457          */
10458         ppd->host_link_state = HLS_DN_OFFLINE;
10459         if (previous_state & HLS_UP) {
10460                 /* went down while link was up */
10461                 handle_linkup_change(dd, 0);
10462         } else if (previous_state
10463                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10464                 /* went down while attempting link up */
10465                 check_lni_states(ppd);
10466
10467                 /* The QSFP doesn't need to be reset on LNI failure */
10468                 ppd->qsfp_info.reset_needed = 0;
10469         }
10470
10471         /* the active link width (downgrade) is 0 on link down */
10472         ppd->link_width_active = 0;
10473         ppd->link_width_downgrade_tx_active = 0;
10474         ppd->link_width_downgrade_rx_active = 0;
10475         ppd->current_egress_rate = 0;
10476         return 0;
10477 }
10478
10479 /* return the link state name */
10480 static const char *link_state_name(u32 state)
10481 {
10482         const char *name;
10483         int n = ilog2(state);
10484         static const char * const names[] = {
10485                 [__HLS_UP_INIT_BP]       = "INIT",
10486                 [__HLS_UP_ARMED_BP]      = "ARMED",
10487                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10488                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10489                 [__HLS_DN_POLL_BP]       = "POLL",
10490                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10491                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10492                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10493                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10494                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10495                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10496         };
10497
10498         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10499         return name ? name : "unknown";
10500 }
10501
10502 /* return the link state reason name */
10503 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10504 {
10505         if (state == HLS_UP_INIT) {
10506                 switch (ppd->linkinit_reason) {
10507                 case OPA_LINKINIT_REASON_LINKUP:
10508                         return "(LINKUP)";
10509                 case OPA_LINKINIT_REASON_FLAPPING:
10510                         return "(FLAPPING)";
10511                 case OPA_LINKINIT_OUTSIDE_POLICY:
10512                         return "(OUTSIDE_POLICY)";
10513                 case OPA_LINKINIT_QUARANTINED:
10514                         return "(QUARANTINED)";
10515                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10516                         return "(INSUFIC_CAPABILITY)";
10517                 default:
10518                         break;
10519                 }
10520         }
10521         return "";
10522 }
10523
10524 /*
10525  * driver_pstate - convert the driver's notion of a port's
10526  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10527  * Return -1 (converted to a u32) to indicate error.
10528  */
10529 u32 driver_pstate(struct hfi1_pportdata *ppd)
10530 {
10531         switch (ppd->host_link_state) {
10532         case HLS_UP_INIT:
10533         case HLS_UP_ARMED:
10534         case HLS_UP_ACTIVE:
10535                 return IB_PORTPHYSSTATE_LINKUP;
10536         case HLS_DN_POLL:
10537                 return IB_PORTPHYSSTATE_POLLING;
10538         case HLS_DN_DISABLE:
10539                 return IB_PORTPHYSSTATE_DISABLED;
10540         case HLS_DN_OFFLINE:
10541                 return OPA_PORTPHYSSTATE_OFFLINE;
10542         case HLS_VERIFY_CAP:
10543                 return IB_PORTPHYSSTATE_TRAINING;
10544         case HLS_GOING_UP:
10545                 return IB_PORTPHYSSTATE_TRAINING;
10546         case HLS_GOING_OFFLINE:
10547                 return OPA_PORTPHYSSTATE_OFFLINE;
10548         case HLS_LINK_COOLDOWN:
10549                 return OPA_PORTPHYSSTATE_OFFLINE;
10550         case HLS_DN_DOWNDEF:
10551         default:
10552                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10553                            ppd->host_link_state);
10554                 return  -1;
10555         }
10556 }
10557
10558 /*
10559  * driver_lstate - convert the driver's notion of a port's
10560  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10561  * (converted to a u32) to indicate error.
10562  */
10563 u32 driver_lstate(struct hfi1_pportdata *ppd)
10564 {
10565         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10566                 return IB_PORT_DOWN;
10567
10568         switch (ppd->host_link_state & HLS_UP) {
10569         case HLS_UP_INIT:
10570                 return IB_PORT_INIT;
10571         case HLS_UP_ARMED:
10572                 return IB_PORT_ARMED;
10573         case HLS_UP_ACTIVE:
10574                 return IB_PORT_ACTIVE;
10575         default:
10576                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10577                            ppd->host_link_state);
10578         return -1;
10579         }
10580 }
10581
10582 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10583                           u8 neigh_reason, u8 rem_reason)
10584 {
10585         if (ppd->local_link_down_reason.latest == 0 &&
10586             ppd->neigh_link_down_reason.latest == 0) {
10587                 ppd->local_link_down_reason.latest = lcl_reason;
10588                 ppd->neigh_link_down_reason.latest = neigh_reason;
10589                 ppd->remote_link_down_reason = rem_reason;
10590         }
10591 }
10592
10593 /**
10594  * data_vls_operational() - Verify if data VL BCT credits and MTU
10595  *                          are both set.
10596  * @ppd: pointer to hfi1_pportdata structure
10597  *
10598  * Return: true - Ok, false -otherwise.
10599  */
10600 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10601 {
10602         int i;
10603         u64 reg;
10604
10605         if (!ppd->actual_vls_operational)
10606                 return false;
10607
10608         for (i = 0; i < ppd->vls_supported; i++) {
10609                 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10610                 if ((reg && !ppd->dd->vld[i].mtu) ||
10611                     (!reg && ppd->dd->vld[i].mtu))
10612                         return false;
10613         }
10614
10615         return true;
10616 }
10617
10618 /*
10619  * Change the physical and/or logical link state.
10620  *
10621  * Do not call this routine while inside an interrupt.  It contains
10622  * calls to routines that can take multiple seconds to finish.
10623  *
10624  * Returns 0 on success, -errno on failure.
10625  */
10626 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10627 {
10628         struct hfi1_devdata *dd = ppd->dd;
10629         struct ib_event event = {.device = NULL};
10630         int ret1, ret = 0;
10631         int orig_new_state, poll_bounce;
10632
10633         mutex_lock(&ppd->hls_lock);
10634
10635         orig_new_state = state;
10636         if (state == HLS_DN_DOWNDEF)
10637                 state = HLS_DEFAULT;
10638
10639         /* interpret poll -> poll as a link bounce */
10640         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10641                       state == HLS_DN_POLL;
10642
10643         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10644                     link_state_name(ppd->host_link_state),
10645                     link_state_name(orig_new_state),
10646                     poll_bounce ? "(bounce) " : "",
10647                     link_state_reason_name(ppd, state));
10648
10649         /*
10650          * If we're going to a (HLS_*) link state that implies the logical
10651          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10652          * reset is_sm_config_started to 0.
10653          */
10654         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10655                 ppd->is_sm_config_started = 0;
10656
10657         /*
10658          * Do nothing if the states match.  Let a poll to poll link bounce
10659          * go through.
10660          */
10661         if (ppd->host_link_state == state && !poll_bounce)
10662                 goto done;
10663
10664         switch (state) {
10665         case HLS_UP_INIT:
10666                 if (ppd->host_link_state == HLS_DN_POLL &&
10667                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10668                         /*
10669                          * Quick link up jumps from polling to here.
10670                          *
10671                          * Whether in normal or loopback mode, the
10672                          * simulator jumps from polling to link up.
10673                          * Accept that here.
10674                          */
10675                         /* OK */
10676                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10677                         goto unexpected;
10678                 }
10679
10680                 /*
10681                  * Wait for Link_Up physical state.
10682                  * Physical and Logical states should already be
10683                  * be transitioned to LinkUp and LinkInit respectively.
10684                  */
10685                 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10686                 if (ret) {
10687                         dd_dev_err(dd,
10688                                    "%s: physical state did not change to LINK-UP\n",
10689                                    __func__);
10690                         break;
10691                 }
10692
10693                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10694                 if (ret) {
10695                         dd_dev_err(dd,
10696                                    "%s: logical state did not change to INIT\n",
10697                                    __func__);
10698                         break;
10699                 }
10700
10701                 /* clear old transient LINKINIT_REASON code */
10702                 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10703                         ppd->linkinit_reason =
10704                                 OPA_LINKINIT_REASON_LINKUP;
10705
10706                 /* enable the port */
10707                 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10708
10709                 handle_linkup_change(dd, 1);
10710                 pio_kernel_linkup(dd);
10711
10712                 /*
10713                  * After link up, a new link width will have been set.
10714                  * Update the xmit counters with regards to the new
10715                  * link width.
10716                  */
10717                 update_xmit_counters(ppd, ppd->link_width_active);
10718
10719                 ppd->host_link_state = HLS_UP_INIT;
10720                 update_statusp(ppd, IB_PORT_INIT);
10721                 break;
10722         case HLS_UP_ARMED:
10723                 if (ppd->host_link_state != HLS_UP_INIT)
10724                         goto unexpected;
10725
10726                 if (!data_vls_operational(ppd)) {
10727                         dd_dev_err(dd,
10728                                    "%s: Invalid data VL credits or mtu\n",
10729                                    __func__);
10730                         ret = -EINVAL;
10731                         break;
10732                 }
10733
10734                 set_logical_state(dd, LSTATE_ARMED);
10735                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10736                 if (ret) {
10737                         dd_dev_err(dd,
10738                                    "%s: logical state did not change to ARMED\n",
10739                                    __func__);
10740                         break;
10741                 }
10742                 ppd->host_link_state = HLS_UP_ARMED;
10743                 update_statusp(ppd, IB_PORT_ARMED);
10744                 /*
10745                  * The simulator does not currently implement SMA messages,
10746                  * so neighbor_normal is not set.  Set it here when we first
10747                  * move to Armed.
10748                  */
10749                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10750                         ppd->neighbor_normal = 1;
10751                 break;
10752         case HLS_UP_ACTIVE:
10753                 if (ppd->host_link_state != HLS_UP_ARMED)
10754                         goto unexpected;
10755
10756                 set_logical_state(dd, LSTATE_ACTIVE);
10757                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10758                 if (ret) {
10759                         dd_dev_err(dd,
10760                                    "%s: logical state did not change to ACTIVE\n",
10761                                    __func__);
10762                 } else {
10763                         /* tell all engines to go running */
10764                         sdma_all_running(dd);
10765                         ppd->host_link_state = HLS_UP_ACTIVE;
10766                         update_statusp(ppd, IB_PORT_ACTIVE);
10767
10768                         /* Signal the IB layer that the port has went active */
10769                         event.device = &dd->verbs_dev.rdi.ibdev;
10770                         event.element.port_num = ppd->port;
10771                         event.event = IB_EVENT_PORT_ACTIVE;
10772                 }
10773                 break;
10774         case HLS_DN_POLL:
10775                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10776                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10777                     dd->dc_shutdown)
10778                         dc_start(dd);
10779                 /* Hand LED control to the DC */
10780                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10781
10782                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10783                         u8 tmp = ppd->link_enabled;
10784
10785                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10786                         if (ret) {
10787                                 ppd->link_enabled = tmp;
10788                                 break;
10789                         }
10790                         ppd->remote_link_down_reason = 0;
10791
10792                         if (ppd->driver_link_ready)
10793                                 ppd->link_enabled = 1;
10794                 }
10795
10796                 set_all_slowpath(ppd->dd);
10797                 ret = set_local_link_attributes(ppd);
10798                 if (ret)
10799                         break;
10800
10801                 ppd->port_error_action = 0;
10802
10803                 if (quick_linkup) {
10804                         /* quick linkup does not go into polling */
10805                         ret = do_quick_linkup(dd);
10806                 } else {
10807                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10808                         if (!ret1)
10809                                 ret1 = wait_phys_link_out_of_offline(ppd,
10810                                                                      3000);
10811                         if (ret1 != HCMD_SUCCESS) {
10812                                 dd_dev_err(dd,
10813                                            "Failed to transition to Polling link state, return 0x%x\n",
10814                                            ret1);
10815                                 ret = -EINVAL;
10816                         }
10817                 }
10818
10819                 /*
10820                  * Change the host link state after requesting DC8051 to
10821                  * change its physical state so that we can ignore any
10822                  * interrupt with stale LNI(XX) error, which will not be
10823                  * cleared until DC8051 transitions to Polling state.
10824                  */
10825                 ppd->host_link_state = HLS_DN_POLL;
10826                 ppd->offline_disabled_reason =
10827                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10828                 /*
10829                  * If an error occurred above, go back to offline.  The
10830                  * caller may reschedule another attempt.
10831                  */
10832                 if (ret)
10833                         goto_offline(ppd, 0);
10834                 else
10835                         log_physical_state(ppd, PLS_POLLING);
10836                 break;
10837         case HLS_DN_DISABLE:
10838                 /* link is disabled */
10839                 ppd->link_enabled = 0;
10840
10841                 /* allow any state to transition to disabled */
10842
10843                 /* must transition to offline first */
10844                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10845                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10846                         if (ret)
10847                                 break;
10848                         ppd->remote_link_down_reason = 0;
10849                 }
10850
10851                 if (!dd->dc_shutdown) {
10852                         ret1 = set_physical_link_state(dd, PLS_DISABLED);
10853                         if (ret1 != HCMD_SUCCESS) {
10854                                 dd_dev_err(dd,
10855                                            "Failed to transition to Disabled link state, return 0x%x\n",
10856                                            ret1);
10857                                 ret = -EINVAL;
10858                                 break;
10859                         }
10860                         ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10861                         if (ret) {
10862                                 dd_dev_err(dd,
10863                                            "%s: physical state did not change to DISABLED\n",
10864                                            __func__);
10865                                 break;
10866                         }
10867                         dc_shutdown(dd);
10868                 }
10869                 ppd->host_link_state = HLS_DN_DISABLE;
10870                 break;
10871         case HLS_DN_OFFLINE:
10872                 if (ppd->host_link_state == HLS_DN_DISABLE)
10873                         dc_start(dd);
10874
10875                 /* allow any state to transition to offline */
10876                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10877                 if (!ret)
10878                         ppd->remote_link_down_reason = 0;
10879                 break;
10880         case HLS_VERIFY_CAP:
10881                 if (ppd->host_link_state != HLS_DN_POLL)
10882                         goto unexpected;
10883                 ppd->host_link_state = HLS_VERIFY_CAP;
10884                 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10885                 break;
10886         case HLS_GOING_UP:
10887                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10888                         goto unexpected;
10889
10890                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10891                 if (ret1 != HCMD_SUCCESS) {
10892                         dd_dev_err(dd,
10893                                    "Failed to transition to link up state, return 0x%x\n",
10894                                    ret1);
10895                         ret = -EINVAL;
10896                         break;
10897                 }
10898                 ppd->host_link_state = HLS_GOING_UP;
10899                 break;
10900
10901         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10902         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10903         default:
10904                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10905                             __func__, state);
10906                 ret = -EINVAL;
10907                 break;
10908         }
10909
10910         goto done;
10911
10912 unexpected:
10913         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10914                    __func__, link_state_name(ppd->host_link_state),
10915                    link_state_name(state));
10916         ret = -EINVAL;
10917
10918 done:
10919         mutex_unlock(&ppd->hls_lock);
10920
10921         if (event.device)
10922                 ib_dispatch_event(&event);
10923
10924         return ret;
10925 }
10926
10927 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10928 {
10929         u64 reg;
10930         int ret = 0;
10931
10932         switch (which) {
10933         case HFI1_IB_CFG_LIDLMC:
10934                 set_lidlmc(ppd);
10935                 break;
10936         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10937                 /*
10938                  * The VL Arbitrator high limit is sent in units of 4k
10939                  * bytes, while HFI stores it in units of 64 bytes.
10940                  */
10941                 val *= 4096 / 64;
10942                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10943                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10944                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10945                 break;
10946         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10947                 /* HFI only supports POLL as the default link down state */
10948                 if (val != HLS_DN_POLL)
10949                         ret = -EINVAL;
10950                 break;
10951         case HFI1_IB_CFG_OP_VLS:
10952                 if (ppd->vls_operational != val) {
10953                         ppd->vls_operational = val;
10954                         if (!ppd->port)
10955                                 ret = -EINVAL;
10956                 }
10957                 break;
10958         /*
10959          * For link width, link width downgrade, and speed enable, always AND
10960          * the setting with what is actually supported.  This has two benefits.
10961          * First, enabled can't have unsupported values, no matter what the
10962          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10963          * "fill in with your supported value" have all the bits in the
10964          * field set, so simply ANDing with supported has the desired result.
10965          */
10966         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10967                 ppd->link_width_enabled = val & ppd->link_width_supported;
10968                 break;
10969         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10970                 ppd->link_width_downgrade_enabled =
10971                                 val & ppd->link_width_downgrade_supported;
10972                 break;
10973         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10974                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10975                 break;
10976         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10977                 /*
10978                  * HFI does not follow IB specs, save this value
10979                  * so we can report it, if asked.
10980                  */
10981                 ppd->overrun_threshold = val;
10982                 break;
10983         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10984                 /*
10985                  * HFI does not follow IB specs, save this value
10986                  * so we can report it, if asked.
10987                  */
10988                 ppd->phy_error_threshold = val;
10989                 break;
10990
10991         case HFI1_IB_CFG_MTU:
10992                 set_send_length(ppd);
10993                 break;
10994
10995         case HFI1_IB_CFG_PKEYS:
10996                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10997                         set_partition_keys(ppd);
10998                 break;
10999
11000         default:
11001                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11002                         dd_dev_info(ppd->dd,
11003                                     "%s: which %s, val 0x%x: not implemented\n",
11004                                     __func__, ib_cfg_name(which), val);
11005                 break;
11006         }
11007         return ret;
11008 }
11009
11010 /* begin functions related to vl arbitration table caching */
11011 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11012 {
11013         int i;
11014
11015         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11016                         VL_ARB_LOW_PRIO_TABLE_SIZE);
11017         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11018                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
11019
11020         /*
11021          * Note that we always return values directly from the
11022          * 'vl_arb_cache' (and do no CSR reads) in response to a
11023          * 'Get(VLArbTable)'. This is obviously correct after a
11024          * 'Set(VLArbTable)', since the cache will then be up to
11025          * date. But it's also correct prior to any 'Set(VLArbTable)'
11026          * since then both the cache, and the relevant h/w registers
11027          * will be zeroed.
11028          */
11029
11030         for (i = 0; i < MAX_PRIO_TABLE; i++)
11031                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11032 }
11033
11034 /*
11035  * vl_arb_lock_cache
11036  *
11037  * All other vl_arb_* functions should be called only after locking
11038  * the cache.
11039  */
11040 static inline struct vl_arb_cache *
11041 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11042 {
11043         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11044                 return NULL;
11045         spin_lock(&ppd->vl_arb_cache[idx].lock);
11046         return &ppd->vl_arb_cache[idx];
11047 }
11048
11049 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11050 {
11051         spin_unlock(&ppd->vl_arb_cache[idx].lock);
11052 }
11053
11054 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11055                              struct ib_vl_weight_elem *vl)
11056 {
11057         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11058 }
11059
11060 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11061                              struct ib_vl_weight_elem *vl)
11062 {
11063         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11064 }
11065
11066 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11067                               struct ib_vl_weight_elem *vl)
11068 {
11069         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11070 }
11071
11072 /* end functions related to vl arbitration table caching */
11073
11074 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11075                           u32 size, struct ib_vl_weight_elem *vl)
11076 {
11077         struct hfi1_devdata *dd = ppd->dd;
11078         u64 reg;
11079         unsigned int i, is_up = 0;
11080         int drain, ret = 0;
11081
11082         mutex_lock(&ppd->hls_lock);
11083
11084         if (ppd->host_link_state & HLS_UP)
11085                 is_up = 1;
11086
11087         drain = !is_ax(dd) && is_up;
11088
11089         if (drain)
11090                 /*
11091                  * Before adjusting VL arbitration weights, empty per-VL
11092                  * FIFOs, otherwise a packet whose VL weight is being
11093                  * set to 0 could get stuck in a FIFO with no chance to
11094                  * egress.
11095                  */
11096                 ret = stop_drain_data_vls(dd);
11097
11098         if (ret) {
11099                 dd_dev_err(
11100                         dd,
11101                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11102                         __func__);
11103                 goto err;
11104         }
11105
11106         for (i = 0; i < size; i++, vl++) {
11107                 /*
11108                  * NOTE: The low priority shift and mask are used here, but
11109                  * they are the same for both the low and high registers.
11110                  */
11111                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11112                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11113                       | (((u64)vl->weight
11114                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11115                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11116                 write_csr(dd, target + (i * 8), reg);
11117         }
11118         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11119
11120         if (drain)
11121                 open_fill_data_vls(dd); /* reopen all VLs */
11122
11123 err:
11124         mutex_unlock(&ppd->hls_lock);
11125
11126         return ret;
11127 }
11128
11129 /*
11130  * Read one credit merge VL register.
11131  */
11132 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11133                            struct vl_limit *vll)
11134 {
11135         u64 reg = read_csr(dd, csr);
11136
11137         vll->dedicated = cpu_to_be16(
11138                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11139                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11140         vll->shared = cpu_to_be16(
11141                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11142                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11143 }
11144
11145 /*
11146  * Read the current credit merge limits.
11147  */
11148 static int get_buffer_control(struct hfi1_devdata *dd,
11149                               struct buffer_control *bc, u16 *overall_limit)
11150 {
11151         u64 reg;
11152         int i;
11153
11154         /* not all entries are filled in */
11155         memset(bc, 0, sizeof(*bc));
11156
11157         /* OPA and HFI have a 1-1 mapping */
11158         for (i = 0; i < TXE_NUM_DATA_VL; i++)
11159                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11160
11161         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11162         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11163
11164         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11165         bc->overall_shared_limit = cpu_to_be16(
11166                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11167                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11168         if (overall_limit)
11169                 *overall_limit = (reg
11170                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11171                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11172         return sizeof(struct buffer_control);
11173 }
11174
11175 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11176 {
11177         u64 reg;
11178         int i;
11179
11180         /* each register contains 16 SC->VLnt mappings, 4 bits each */
11181         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11182         for (i = 0; i < sizeof(u64); i++) {
11183                 u8 byte = *(((u8 *)&reg) + i);
11184
11185                 dp->vlnt[2 * i] = byte & 0xf;
11186                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11187         }
11188
11189         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11190         for (i = 0; i < sizeof(u64); i++) {
11191                 u8 byte = *(((u8 *)&reg) + i);
11192
11193                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11194                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11195         }
11196         return sizeof(struct sc2vlnt);
11197 }
11198
11199 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11200                               struct ib_vl_weight_elem *vl)
11201 {
11202         unsigned int i;
11203
11204         for (i = 0; i < nelems; i++, vl++) {
11205                 vl->vl = 0xf;
11206                 vl->weight = 0;
11207         }
11208 }
11209
11210 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11211 {
11212         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11213                   DC_SC_VL_VAL(15_0,
11214                                0, dp->vlnt[0] & 0xf,
11215                                1, dp->vlnt[1] & 0xf,
11216                                2, dp->vlnt[2] & 0xf,
11217                                3, dp->vlnt[3] & 0xf,
11218                                4, dp->vlnt[4] & 0xf,
11219                                5, dp->vlnt[5] & 0xf,
11220                                6, dp->vlnt[6] & 0xf,
11221                                7, dp->vlnt[7] & 0xf,
11222                                8, dp->vlnt[8] & 0xf,
11223                                9, dp->vlnt[9] & 0xf,
11224                                10, dp->vlnt[10] & 0xf,
11225                                11, dp->vlnt[11] & 0xf,
11226                                12, dp->vlnt[12] & 0xf,
11227                                13, dp->vlnt[13] & 0xf,
11228                                14, dp->vlnt[14] & 0xf,
11229                                15, dp->vlnt[15] & 0xf));
11230         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11231                   DC_SC_VL_VAL(31_16,
11232                                16, dp->vlnt[16] & 0xf,
11233                                17, dp->vlnt[17] & 0xf,
11234                                18, dp->vlnt[18] & 0xf,
11235                                19, dp->vlnt[19] & 0xf,
11236                                20, dp->vlnt[20] & 0xf,
11237                                21, dp->vlnt[21] & 0xf,
11238                                22, dp->vlnt[22] & 0xf,
11239                                23, dp->vlnt[23] & 0xf,
11240                                24, dp->vlnt[24] & 0xf,
11241                                25, dp->vlnt[25] & 0xf,
11242                                26, dp->vlnt[26] & 0xf,
11243                                27, dp->vlnt[27] & 0xf,
11244                                28, dp->vlnt[28] & 0xf,
11245                                29, dp->vlnt[29] & 0xf,
11246                                30, dp->vlnt[30] & 0xf,
11247                                31, dp->vlnt[31] & 0xf));
11248 }
11249
11250 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11251                         u16 limit)
11252 {
11253         if (limit != 0)
11254                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11255                             what, (int)limit, idx);
11256 }
11257
11258 /* change only the shared limit portion of SendCmGLobalCredit */
11259 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11260 {
11261         u64 reg;
11262
11263         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11264         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11265         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11266         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11267 }
11268
11269 /* change only the total credit limit portion of SendCmGLobalCredit */
11270 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11271 {
11272         u64 reg;
11273
11274         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11275         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11276         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11277         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11278 }
11279
11280 /* set the given per-VL shared limit */
11281 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11282 {
11283         u64 reg;
11284         u32 addr;
11285
11286         if (vl < TXE_NUM_DATA_VL)
11287                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11288         else
11289                 addr = SEND_CM_CREDIT_VL15;
11290
11291         reg = read_csr(dd, addr);
11292         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11293         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11294         write_csr(dd, addr, reg);
11295 }
11296
11297 /* set the given per-VL dedicated limit */
11298 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11299 {
11300         u64 reg;
11301         u32 addr;
11302
11303         if (vl < TXE_NUM_DATA_VL)
11304                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11305         else
11306                 addr = SEND_CM_CREDIT_VL15;
11307
11308         reg = read_csr(dd, addr);
11309         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11310         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11311         write_csr(dd, addr, reg);
11312 }
11313
11314 /* spin until the given per-VL status mask bits clear */
11315 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11316                                      const char *which)
11317 {
11318         unsigned long timeout;
11319         u64 reg;
11320
11321         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11322         while (1) {
11323                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11324
11325                 if (reg == 0)
11326                         return; /* success */
11327                 if (time_after(jiffies, timeout))
11328                         break;          /* timed out */
11329                 udelay(1);
11330         }
11331
11332         dd_dev_err(dd,
11333                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11334                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11335         /*
11336          * If this occurs, it is likely there was a credit loss on the link.
11337          * The only recovery from that is a link bounce.
11338          */
11339         dd_dev_err(dd,
11340                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11341 }
11342
11343 /*
11344  * The number of credits on the VLs may be changed while everything
11345  * is "live", but the following algorithm must be followed due to
11346  * how the hardware is actually implemented.  In particular,
11347  * Return_Credit_Status[] is the only correct status check.
11348  *
11349  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11350  *     set Global_Shared_Credit_Limit = 0
11351  *     use_all_vl = 1
11352  * mask0 = all VLs that are changing either dedicated or shared limits
11353  * set Shared_Limit[mask0] = 0
11354  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11355  * if (changing any dedicated limit)
11356  *     mask1 = all VLs that are lowering dedicated limits
11357  *     lower Dedicated_Limit[mask1]
11358  *     spin until Return_Credit_Status[mask1] == 0
11359  *     raise Dedicated_Limits
11360  * raise Shared_Limits
11361  * raise Global_Shared_Credit_Limit
11362  *
11363  * lower = if the new limit is lower, set the limit to the new value
11364  * raise = if the new limit is higher than the current value (may be changed
11365  *      earlier in the algorithm), set the new limit to the new value
11366  */
11367 int set_buffer_control(struct hfi1_pportdata *ppd,
11368                        struct buffer_control *new_bc)
11369 {
11370         struct hfi1_devdata *dd = ppd->dd;
11371         u64 changing_mask, ld_mask, stat_mask;
11372         int change_count;
11373         int i, use_all_mask;
11374         int this_shared_changing;
11375         int vl_count = 0, ret;
11376         /*
11377          * A0: add the variable any_shared_limit_changing below and in the
11378          * algorithm above.  If removing A0 support, it can be removed.
11379          */
11380         int any_shared_limit_changing;
11381         struct buffer_control cur_bc;
11382         u8 changing[OPA_MAX_VLS];
11383         u8 lowering_dedicated[OPA_MAX_VLS];
11384         u16 cur_total;
11385         u32 new_total = 0;
11386         const u64 all_mask =
11387         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11388          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11389          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11390          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11391          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11392          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11393          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11394          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11395          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11396
11397 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11398 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11399
11400         /* find the new total credits, do sanity check on unused VLs */
11401         for (i = 0; i < OPA_MAX_VLS; i++) {
11402                 if (valid_vl(i)) {
11403                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11404                         continue;
11405                 }
11406                 nonzero_msg(dd, i, "dedicated",
11407                             be16_to_cpu(new_bc->vl[i].dedicated));
11408                 nonzero_msg(dd, i, "shared",
11409                             be16_to_cpu(new_bc->vl[i].shared));
11410                 new_bc->vl[i].dedicated = 0;
11411                 new_bc->vl[i].shared = 0;
11412         }
11413         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11414
11415         /* fetch the current values */
11416         get_buffer_control(dd, &cur_bc, &cur_total);
11417
11418         /*
11419          * Create the masks we will use.
11420          */
11421         memset(changing, 0, sizeof(changing));
11422         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11423         /*
11424          * NOTE: Assumes that the individual VL bits are adjacent and in
11425          * increasing order
11426          */
11427         stat_mask =
11428                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11429         changing_mask = 0;
11430         ld_mask = 0;
11431         change_count = 0;
11432         any_shared_limit_changing = 0;
11433         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11434                 if (!valid_vl(i))
11435                         continue;
11436                 this_shared_changing = new_bc->vl[i].shared
11437                                                 != cur_bc.vl[i].shared;
11438                 if (this_shared_changing)
11439                         any_shared_limit_changing = 1;
11440                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11441                     this_shared_changing) {
11442                         changing[i] = 1;
11443                         changing_mask |= stat_mask;
11444                         change_count++;
11445                 }
11446                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11447                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11448                         lowering_dedicated[i] = 1;
11449                         ld_mask |= stat_mask;
11450                 }
11451         }
11452
11453         /* bracket the credit change with a total adjustment */
11454         if (new_total > cur_total)
11455                 set_global_limit(dd, new_total);
11456
11457         /*
11458          * Start the credit change algorithm.
11459          */
11460         use_all_mask = 0;
11461         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11462              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11463             (is_ax(dd) && any_shared_limit_changing)) {
11464                 set_global_shared(dd, 0);
11465                 cur_bc.overall_shared_limit = 0;
11466                 use_all_mask = 1;
11467         }
11468
11469         for (i = 0; i < NUM_USABLE_VLS; i++) {
11470                 if (!valid_vl(i))
11471                         continue;
11472
11473                 if (changing[i]) {
11474                         set_vl_shared(dd, i, 0);
11475                         cur_bc.vl[i].shared = 0;
11476                 }
11477         }
11478
11479         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11480                                  "shared");
11481
11482         if (change_count > 0) {
11483                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11484                         if (!valid_vl(i))
11485                                 continue;
11486
11487                         if (lowering_dedicated[i]) {
11488                                 set_vl_dedicated(dd, i,
11489                                                  be16_to_cpu(new_bc->
11490                                                              vl[i].dedicated));
11491                                 cur_bc.vl[i].dedicated =
11492                                                 new_bc->vl[i].dedicated;
11493                         }
11494                 }
11495
11496                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11497
11498                 /* now raise all dedicated that are going up */
11499                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11500                         if (!valid_vl(i))
11501                                 continue;
11502
11503                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11504                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11505                                 set_vl_dedicated(dd, i,
11506                                                  be16_to_cpu(new_bc->
11507                                                              vl[i].dedicated));
11508                 }
11509         }
11510
11511         /* next raise all shared that are going up */
11512         for (i = 0; i < NUM_USABLE_VLS; i++) {
11513                 if (!valid_vl(i))
11514                         continue;
11515
11516                 if (be16_to_cpu(new_bc->vl[i].shared) >
11517                                 be16_to_cpu(cur_bc.vl[i].shared))
11518                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11519         }
11520
11521         /* finally raise the global shared */
11522         if (be16_to_cpu(new_bc->overall_shared_limit) >
11523             be16_to_cpu(cur_bc.overall_shared_limit))
11524                 set_global_shared(dd,
11525                                   be16_to_cpu(new_bc->overall_shared_limit));
11526
11527         /* bracket the credit change with a total adjustment */
11528         if (new_total < cur_total)
11529                 set_global_limit(dd, new_total);
11530
11531         /*
11532          * Determine the actual number of operational VLS using the number of
11533          * dedicated and shared credits for each VL.
11534          */
11535         if (change_count > 0) {
11536                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11537                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11538                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11539                                 vl_count++;
11540                 ppd->actual_vls_operational = vl_count;
11541                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11542                                     ppd->actual_vls_operational :
11543                                     ppd->vls_operational,
11544                                     NULL);
11545                 if (ret == 0)
11546                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11547                                            ppd->actual_vls_operational :
11548                                            ppd->vls_operational, NULL);
11549                 if (ret)
11550                         return ret;
11551         }
11552         return 0;
11553 }
11554
11555 /*
11556  * Read the given fabric manager table. Return the size of the
11557  * table (in bytes) on success, and a negative error code on
11558  * failure.
11559  */
11560 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11561
11562 {
11563         int size;
11564         struct vl_arb_cache *vlc;
11565
11566         switch (which) {
11567         case FM_TBL_VL_HIGH_ARB:
11568                 size = 256;
11569                 /*
11570                  * OPA specifies 128 elements (of 2 bytes each), though
11571                  * HFI supports only 16 elements in h/w.
11572                  */
11573                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11574                 vl_arb_get_cache(vlc, t);
11575                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11576                 break;
11577         case FM_TBL_VL_LOW_ARB:
11578                 size = 256;
11579                 /*
11580                  * OPA specifies 128 elements (of 2 bytes each), though
11581                  * HFI supports only 16 elements in h/w.
11582                  */
11583                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11584                 vl_arb_get_cache(vlc, t);
11585                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11586                 break;
11587         case FM_TBL_BUFFER_CONTROL:
11588                 size = get_buffer_control(ppd->dd, t, NULL);
11589                 break;
11590         case FM_TBL_SC2VLNT:
11591                 size = get_sc2vlnt(ppd->dd, t);
11592                 break;
11593         case FM_TBL_VL_PREEMPT_ELEMS:
11594                 size = 256;
11595                 /* OPA specifies 128 elements, of 2 bytes each */
11596                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11597                 break;
11598         case FM_TBL_VL_PREEMPT_MATRIX:
11599                 size = 256;
11600                 /*
11601                  * OPA specifies that this is the same size as the VL
11602                  * arbitration tables (i.e., 256 bytes).
11603                  */
11604                 break;
11605         default:
11606                 return -EINVAL;
11607         }
11608         return size;
11609 }
11610
11611 /*
11612  * Write the given fabric manager table.
11613  */
11614 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11615 {
11616         int ret = 0;
11617         struct vl_arb_cache *vlc;
11618
11619         switch (which) {
11620         case FM_TBL_VL_HIGH_ARB:
11621                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11622                 if (vl_arb_match_cache(vlc, t)) {
11623                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11624                         break;
11625                 }
11626                 vl_arb_set_cache(vlc, t);
11627                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11628                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11629                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11630                 break;
11631         case FM_TBL_VL_LOW_ARB:
11632                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11633                 if (vl_arb_match_cache(vlc, t)) {
11634                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11635                         break;
11636                 }
11637                 vl_arb_set_cache(vlc, t);
11638                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11639                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11640                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11641                 break;
11642         case FM_TBL_BUFFER_CONTROL:
11643                 ret = set_buffer_control(ppd, t);
11644                 break;
11645         case FM_TBL_SC2VLNT:
11646                 set_sc2vlnt(ppd->dd, t);
11647                 break;
11648         default:
11649                 ret = -EINVAL;
11650         }
11651         return ret;
11652 }
11653
11654 /*
11655  * Disable all data VLs.
11656  *
11657  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11658  */
11659 static int disable_data_vls(struct hfi1_devdata *dd)
11660 {
11661         if (is_ax(dd))
11662                 return 1;
11663
11664         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11665
11666         return 0;
11667 }
11668
11669 /*
11670  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11671  * Just re-enables all data VLs (the "fill" part happens
11672  * automatically - the name was chosen for symmetry with
11673  * stop_drain_data_vls()).
11674  *
11675  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11676  */
11677 int open_fill_data_vls(struct hfi1_devdata *dd)
11678 {
11679         if (is_ax(dd))
11680                 return 1;
11681
11682         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11683
11684         return 0;
11685 }
11686
11687 /*
11688  * drain_data_vls() - assumes that disable_data_vls() has been called,
11689  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11690  * engines to drop to 0.
11691  */
11692 static void drain_data_vls(struct hfi1_devdata *dd)
11693 {
11694         sc_wait(dd);
11695         sdma_wait(dd);
11696         pause_for_credit_return(dd);
11697 }
11698
11699 /*
11700  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11701  *
11702  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11703  * meant to be used like this:
11704  *
11705  * stop_drain_data_vls(dd);
11706  * // do things with per-VL resources
11707  * open_fill_data_vls(dd);
11708  */
11709 int stop_drain_data_vls(struct hfi1_devdata *dd)
11710 {
11711         int ret;
11712
11713         ret = disable_data_vls(dd);
11714         if (ret == 0)
11715                 drain_data_vls(dd);
11716
11717         return ret;
11718 }
11719
11720 /*
11721  * Convert a nanosecond time to a cclock count.  No matter how slow
11722  * the cclock, a non-zero ns will always have a non-zero result.
11723  */
11724 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11725 {
11726         u32 cclocks;
11727
11728         if (dd->icode == ICODE_FPGA_EMULATION)
11729                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11730         else  /* simulation pretends to be ASIC */
11731                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11732         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11733                 cclocks = 1;
11734         return cclocks;
11735 }
11736
11737 /*
11738  * Convert a cclock count to nanoseconds. Not matter how slow
11739  * the cclock, a non-zero cclocks will always have a non-zero result.
11740  */
11741 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11742 {
11743         u32 ns;
11744
11745         if (dd->icode == ICODE_FPGA_EMULATION)
11746                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11747         else  /* simulation pretends to be ASIC */
11748                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11749         if (cclocks && !ns)
11750                 ns = 1;
11751         return ns;
11752 }
11753
11754 /*
11755  * Dynamically adjust the receive interrupt timeout for a context based on
11756  * incoming packet rate.
11757  *
11758  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11759  */
11760 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11761 {
11762         struct hfi1_devdata *dd = rcd->dd;
11763         u32 timeout = rcd->rcvavail_timeout;
11764
11765         /*
11766          * This algorithm doubles or halves the timeout depending on whether
11767          * the number of packets received in this interrupt were less than or
11768          * greater equal the interrupt count.
11769          *
11770          * The calculations below do not allow a steady state to be achieved.
11771          * Only at the endpoints it is possible to have an unchanging
11772          * timeout.
11773          */
11774         if (npkts < rcv_intr_count) {
11775                 /*
11776                  * Not enough packets arrived before the timeout, adjust
11777                  * timeout downward.
11778                  */
11779                 if (timeout < 2) /* already at minimum? */
11780                         return;
11781                 timeout >>= 1;
11782         } else {
11783                 /*
11784                  * More than enough packets arrived before the timeout, adjust
11785                  * timeout upward.
11786                  */
11787                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11788                         return;
11789                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11790         }
11791
11792         rcd->rcvavail_timeout = timeout;
11793         /*
11794          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11795          * been verified to be in range
11796          */
11797         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11798                         (u64)timeout <<
11799                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11800 }
11801
11802 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11803                     u32 intr_adjust, u32 npkts)
11804 {
11805         struct hfi1_devdata *dd = rcd->dd;
11806         u64 reg;
11807         u32 ctxt = rcd->ctxt;
11808
11809         /*
11810          * Need to write timeout register before updating RcvHdrHead to ensure
11811          * that a new value is used when the HW decides to restart counting.
11812          */
11813         if (intr_adjust)
11814                 adjust_rcv_timeout(rcd, npkts);
11815         if (updegr) {
11816                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11817                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11818                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11819         }
11820         mmiowb();
11821         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11822                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11823                         << RCV_HDR_HEAD_HEAD_SHIFT);
11824         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11825         mmiowb();
11826 }
11827
11828 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11829 {
11830         u32 head, tail;
11831
11832         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11833                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11834
11835         if (rcd->rcvhdrtail_kvaddr)
11836                 tail = get_rcvhdrtail(rcd);
11837         else
11838                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11839
11840         return head == tail;
11841 }
11842
11843 /*
11844  * Context Control and Receive Array encoding for buffer size:
11845  *      0x0 invalid
11846  *      0x1   4 KB
11847  *      0x2   8 KB
11848  *      0x3  16 KB
11849  *      0x4  32 KB
11850  *      0x5  64 KB
11851  *      0x6 128 KB
11852  *      0x7 256 KB
11853  *      0x8 512 KB (Receive Array only)
11854  *      0x9   1 MB (Receive Array only)
11855  *      0xa   2 MB (Receive Array only)
11856  *
11857  *      0xB-0xF - reserved (Receive Array only)
11858  *
11859  *
11860  * This routine assumes that the value has already been sanity checked.
11861  */
11862 static u32 encoded_size(u32 size)
11863 {
11864         switch (size) {
11865         case   4 * 1024: return 0x1;
11866         case   8 * 1024: return 0x2;
11867         case  16 * 1024: return 0x3;
11868         case  32 * 1024: return 0x4;
11869         case  64 * 1024: return 0x5;
11870         case 128 * 1024: return 0x6;
11871         case 256 * 1024: return 0x7;
11872         case 512 * 1024: return 0x8;
11873         case   1 * 1024 * 1024: return 0x9;
11874         case   2 * 1024 * 1024: return 0xa;
11875         }
11876         return 0x1;     /* if invalid, go with the minimum size */
11877 }
11878
11879 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11880                   struct hfi1_ctxtdata *rcd)
11881 {
11882         u64 rcvctrl, reg;
11883         int did_enable = 0;
11884         u16 ctxt;
11885
11886         if (!rcd)
11887                 return;
11888
11889         ctxt = rcd->ctxt;
11890
11891         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11892
11893         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11894         /* if the context already enabled, don't do the extra steps */
11895         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11896             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11897                 /* reset the tail and hdr addresses, and sequence count */
11898                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11899                                 rcd->rcvhdrq_dma);
11900                 if (rcd->rcvhdrtail_kvaddr)
11901                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11902                                         rcd->rcvhdrqtailaddr_dma);
11903                 rcd->seq_cnt = 1;
11904
11905                 /* reset the cached receive header queue head value */
11906                 rcd->head = 0;
11907
11908                 /*
11909                  * Zero the receive header queue so we don't get false
11910                  * positives when checking the sequence number.  The
11911                  * sequence numbers could land exactly on the same spot.
11912                  * E.g. a rcd restart before the receive header wrapped.
11913                  */
11914                 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11915
11916                 /* starting timeout */
11917                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11918
11919                 /* enable the context */
11920                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11921
11922                 /* clean the egr buffer size first */
11923                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11924                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11925                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11926                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11927
11928                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11929                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11930                 did_enable = 1;
11931
11932                 /* zero RcvEgrIndexHead */
11933                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11934
11935                 /* set eager count and base index */
11936                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11937                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11938                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11939                         (((rcd->eager_base >> RCV_SHIFT)
11940                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11941                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11942                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11943
11944                 /*
11945                  * Set TID (expected) count and base index.
11946                  * rcd->expected_count is set to individual RcvArray entries,
11947                  * not pairs, and the CSR takes a pair-count in groups of
11948                  * four, so divide by 8.
11949                  */
11950                 reg = (((rcd->expected_count >> RCV_SHIFT)
11951                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11952                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11953                       (((rcd->expected_base >> RCV_SHIFT)
11954                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11955                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11956                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11957                 if (ctxt == HFI1_CTRL_CTXT)
11958                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11959         }
11960         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11961                 write_csr(dd, RCV_VL15, 0);
11962                 /*
11963                  * When receive context is being disabled turn on tail
11964                  * update with a dummy tail address and then disable
11965                  * receive context.
11966                  */
11967                 if (dd->rcvhdrtail_dummy_dma) {
11968                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11969                                         dd->rcvhdrtail_dummy_dma);
11970                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11971                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11972                 }
11973
11974                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11975         }
11976         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11977                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11978         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11979                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11980         if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11981                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11982         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11983                 /* See comment on RcvCtxtCtrl.TailUpd above */
11984                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11985                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11986         }
11987         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11988                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11989         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11990                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11991         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11992                 /*
11993                  * In one-packet-per-eager mode, the size comes from
11994                  * the RcvArray entry.
11995                  */
11996                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11997                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11998         }
11999         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12000                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12001         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12002                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12003         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12004                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12005         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12006                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12007         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12008                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12009         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12010         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12011
12012         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12013         if (did_enable &&
12014             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12015                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12016                 if (reg != 0) {
12017                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12018                                     ctxt, reg);
12019                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12020                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12021                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12022                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12023                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12024                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12025                                     ctxt, reg, reg == 0 ? "not" : "still");
12026                 }
12027         }
12028
12029         if (did_enable) {
12030                 /*
12031                  * The interrupt timeout and count must be set after
12032                  * the context is enabled to take effect.
12033                  */
12034                 /* set interrupt timeout */
12035                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12036                                 (u64)rcd->rcvavail_timeout <<
12037                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12038
12039                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12040                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12041                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12042         }
12043
12044         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12045                 /*
12046                  * If the context has been disabled and the Tail Update has
12047                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12048                  * so it doesn't contain an address that is invalid.
12049                  */
12050                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12051                                 dd->rcvhdrtail_dummy_dma);
12052 }
12053
12054 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12055 {
12056         int ret;
12057         u64 val = 0;
12058
12059         if (namep) {
12060                 ret = dd->cntrnameslen;
12061                 *namep = dd->cntrnames;
12062         } else {
12063                 const struct cntr_entry *entry;
12064                 int i, j;
12065
12066                 ret = (dd->ndevcntrs) * sizeof(u64);
12067
12068                 /* Get the start of the block of counters */
12069                 *cntrp = dd->cntrs;
12070
12071                 /*
12072                  * Now go and fill in each counter in the block.
12073                  */
12074                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12075                         entry = &dev_cntrs[i];
12076                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12077                         if (entry->flags & CNTR_DISABLED) {
12078                                 /* Nothing */
12079                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12080                         } else {
12081                                 if (entry->flags & CNTR_VL) {
12082                                         hfi1_cdbg(CNTR, "\tPer VL\n");
12083                                         for (j = 0; j < C_VL_COUNT; j++) {
12084                                                 val = entry->rw_cntr(entry,
12085                                                                   dd, j,
12086                                                                   CNTR_MODE_R,
12087                                                                   0);
12088                                                 hfi1_cdbg(
12089                                                    CNTR,
12090                                                    "\t\tRead 0x%llx for %d\n",
12091                                                    val, j);
12092                                                 dd->cntrs[entry->offset + j] =
12093                                                                             val;
12094                                         }
12095                                 } else if (entry->flags & CNTR_SDMA) {
12096                                         hfi1_cdbg(CNTR,
12097                                                   "\t Per SDMA Engine\n");
12098                                         for (j = 0; j < chip_sdma_engines(dd);
12099                                              j++) {
12100                                                 val =
12101                                                 entry->rw_cntr(entry, dd, j,
12102                                                                CNTR_MODE_R, 0);
12103                                                 hfi1_cdbg(CNTR,
12104                                                           "\t\tRead 0x%llx for %d\n",
12105                                                           val, j);
12106                                                 dd->cntrs[entry->offset + j] =
12107                                                                         val;
12108                                         }
12109                                 } else {
12110                                         val = entry->rw_cntr(entry, dd,
12111                                                         CNTR_INVALID_VL,
12112                                                         CNTR_MODE_R, 0);
12113                                         dd->cntrs[entry->offset] = val;
12114                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12115                                 }
12116                         }
12117                 }
12118         }
12119         return ret;
12120 }
12121
12122 /*
12123  * Used by sysfs to create files for hfi stats to read
12124  */
12125 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12126 {
12127         int ret;
12128         u64 val = 0;
12129
12130         if (namep) {
12131                 ret = ppd->dd->portcntrnameslen;
12132                 *namep = ppd->dd->portcntrnames;
12133         } else {
12134                 const struct cntr_entry *entry;
12135                 int i, j;
12136
12137                 ret = ppd->dd->nportcntrs * sizeof(u64);
12138                 *cntrp = ppd->cntrs;
12139
12140                 for (i = 0; i < PORT_CNTR_LAST; i++) {
12141                         entry = &port_cntrs[i];
12142                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12143                         if (entry->flags & CNTR_DISABLED) {
12144                                 /* Nothing */
12145                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12146                                 continue;
12147                         }
12148
12149                         if (entry->flags & CNTR_VL) {
12150                                 hfi1_cdbg(CNTR, "\tPer VL");
12151                                 for (j = 0; j < C_VL_COUNT; j++) {
12152                                         val = entry->rw_cntr(entry, ppd, j,
12153                                                                CNTR_MODE_R,
12154                                                                0);
12155                                         hfi1_cdbg(
12156                                            CNTR,
12157                                            "\t\tRead 0x%llx for %d",
12158                                            val, j);
12159                                         ppd->cntrs[entry->offset + j] = val;
12160                                 }
12161                         } else {
12162                                 val = entry->rw_cntr(entry, ppd,
12163                                                        CNTR_INVALID_VL,
12164                                                        CNTR_MODE_R,
12165                                                        0);
12166                                 ppd->cntrs[entry->offset] = val;
12167                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12168                         }
12169                 }
12170         }
12171         return ret;
12172 }
12173
12174 static void free_cntrs(struct hfi1_devdata *dd)
12175 {
12176         struct hfi1_pportdata *ppd;
12177         int i;
12178
12179         if (dd->synth_stats_timer.function)
12180                 del_timer_sync(&dd->synth_stats_timer);
12181         ppd = (struct hfi1_pportdata *)(dd + 1);
12182         for (i = 0; i < dd->num_pports; i++, ppd++) {
12183                 kfree(ppd->cntrs);
12184                 kfree(ppd->scntrs);
12185                 free_percpu(ppd->ibport_data.rvp.rc_acks);
12186                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12187                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12188                 ppd->cntrs = NULL;
12189                 ppd->scntrs = NULL;
12190                 ppd->ibport_data.rvp.rc_acks = NULL;
12191                 ppd->ibport_data.rvp.rc_qacks = NULL;
12192                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12193         }
12194         kfree(dd->portcntrnames);
12195         dd->portcntrnames = NULL;
12196         kfree(dd->cntrs);
12197         dd->cntrs = NULL;
12198         kfree(dd->scntrs);
12199         dd->scntrs = NULL;
12200         kfree(dd->cntrnames);
12201         dd->cntrnames = NULL;
12202         if (dd->update_cntr_wq) {
12203                 destroy_workqueue(dd->update_cntr_wq);
12204                 dd->update_cntr_wq = NULL;
12205         }
12206 }
12207
12208 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12209                               u64 *psval, void *context, int vl)
12210 {
12211         u64 val;
12212         u64 sval = *psval;
12213
12214         if (entry->flags & CNTR_DISABLED) {
12215                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12216                 return 0;
12217         }
12218
12219         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12220
12221         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12222
12223         /* If its a synthetic counter there is more work we need to do */
12224         if (entry->flags & CNTR_SYNTH) {
12225                 if (sval == CNTR_MAX) {
12226                         /* No need to read already saturated */
12227                         return CNTR_MAX;
12228                 }
12229
12230                 if (entry->flags & CNTR_32BIT) {
12231                         /* 32bit counters can wrap multiple times */
12232                         u64 upper = sval >> 32;
12233                         u64 lower = (sval << 32) >> 32;
12234
12235                         if (lower > val) { /* hw wrapped */
12236                                 if (upper == CNTR_32BIT_MAX)
12237                                         val = CNTR_MAX;
12238                                 else
12239                                         upper++;
12240                         }
12241
12242                         if (val != CNTR_MAX)
12243                                 val = (upper << 32) | val;
12244
12245                 } else {
12246                         /* If we rolled we are saturated */
12247                         if ((val < sval) || (val > CNTR_MAX))
12248                                 val = CNTR_MAX;
12249                 }
12250         }
12251
12252         *psval = val;
12253
12254         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12255
12256         return val;
12257 }
12258
12259 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12260                                struct cntr_entry *entry,
12261                                u64 *psval, void *context, int vl, u64 data)
12262 {
12263         u64 val;
12264
12265         if (entry->flags & CNTR_DISABLED) {
12266                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12267                 return 0;
12268         }
12269
12270         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12271
12272         if (entry->flags & CNTR_SYNTH) {
12273                 *psval = data;
12274                 if (entry->flags & CNTR_32BIT) {
12275                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12276                                              (data << 32) >> 32);
12277                         val = data; /* return the full 64bit value */
12278                 } else {
12279                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12280                                              data);
12281                 }
12282         } else {
12283                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12284         }
12285
12286         *psval = val;
12287
12288         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12289
12290         return val;
12291 }
12292
12293 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12294 {
12295         struct cntr_entry *entry;
12296         u64 *sval;
12297
12298         entry = &dev_cntrs[index];
12299         sval = dd->scntrs + entry->offset;
12300
12301         if (vl != CNTR_INVALID_VL)
12302                 sval += vl;
12303
12304         return read_dev_port_cntr(dd, entry, sval, dd, vl);
12305 }
12306
12307 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12308 {
12309         struct cntr_entry *entry;
12310         u64 *sval;
12311
12312         entry = &dev_cntrs[index];
12313         sval = dd->scntrs + entry->offset;
12314
12315         if (vl != CNTR_INVALID_VL)
12316                 sval += vl;
12317
12318         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12319 }
12320
12321 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12322 {
12323         struct cntr_entry *entry;
12324         u64 *sval;
12325
12326         entry = &port_cntrs[index];
12327         sval = ppd->scntrs + entry->offset;
12328
12329         if (vl != CNTR_INVALID_VL)
12330                 sval += vl;
12331
12332         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12333             (index <= C_RCV_HDR_OVF_LAST)) {
12334                 /* We do not want to bother for disabled contexts */
12335                 return 0;
12336         }
12337
12338         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12339 }
12340
12341 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12342 {
12343         struct cntr_entry *entry;
12344         u64 *sval;
12345
12346         entry = &port_cntrs[index];
12347         sval = ppd->scntrs + entry->offset;
12348
12349         if (vl != CNTR_INVALID_VL)
12350                 sval += vl;
12351
12352         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12353             (index <= C_RCV_HDR_OVF_LAST)) {
12354                 /* We do not want to bother for disabled contexts */
12355                 return 0;
12356         }
12357
12358         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12359 }
12360
12361 static void do_update_synth_timer(struct work_struct *work)
12362 {
12363         u64 cur_tx;
12364         u64 cur_rx;
12365         u64 total_flits;
12366         u8 update = 0;
12367         int i, j, vl;
12368         struct hfi1_pportdata *ppd;
12369         struct cntr_entry *entry;
12370         struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12371                                                update_cntr_work);
12372
12373         /*
12374          * Rather than keep beating on the CSRs pick a minimal set that we can
12375          * check to watch for potential roll over. We can do this by looking at
12376          * the number of flits sent/recv. If the total flits exceeds 32bits then
12377          * we have to iterate all the counters and update.
12378          */
12379         entry = &dev_cntrs[C_DC_RCV_FLITS];
12380         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12381
12382         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12383         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12384
12385         hfi1_cdbg(
12386             CNTR,
12387             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12388             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12389
12390         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12391                 /*
12392                  * May not be strictly necessary to update but it won't hurt and
12393                  * simplifies the logic here.
12394                  */
12395                 update = 1;
12396                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12397                           dd->unit);
12398         } else {
12399                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12400                 hfi1_cdbg(CNTR,
12401                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12402                           total_flits, (u64)CNTR_32BIT_MAX);
12403                 if (total_flits >= CNTR_32BIT_MAX) {
12404                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12405                                   dd->unit);
12406                         update = 1;
12407                 }
12408         }
12409
12410         if (update) {
12411                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12412                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12413                         entry = &dev_cntrs[i];
12414                         if (entry->flags & CNTR_VL) {
12415                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12416                                         read_dev_cntr(dd, i, vl);
12417                         } else {
12418                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12419                         }
12420                 }
12421                 ppd = (struct hfi1_pportdata *)(dd + 1);
12422                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12423                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12424                                 entry = &port_cntrs[j];
12425                                 if (entry->flags & CNTR_VL) {
12426                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12427                                                 read_port_cntr(ppd, j, vl);
12428                                 } else {
12429                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12430                                 }
12431                         }
12432                 }
12433
12434                 /*
12435                  * We want the value in the register. The goal is to keep track
12436                  * of the number of "ticks" not the counter value. In other
12437                  * words if the register rolls we want to notice it and go ahead
12438                  * and force an update.
12439                  */
12440                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12441                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12442                                                 CNTR_MODE_R, 0);
12443
12444                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12445                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12446                                                 CNTR_MODE_R, 0);
12447
12448                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12449                           dd->unit, dd->last_tx, dd->last_rx);
12450
12451         } else {
12452                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12453         }
12454 }
12455
12456 static void update_synth_timer(struct timer_list *t)
12457 {
12458         struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12459
12460         queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12461         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12462 }
12463
12464 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12465 static int init_cntrs(struct hfi1_devdata *dd)
12466 {
12467         int i, rcv_ctxts, j;
12468         size_t sz;
12469         char *p;
12470         char name[C_MAX_NAME];
12471         struct hfi1_pportdata *ppd;
12472         const char *bit_type_32 = ",32";
12473         const int bit_type_32_sz = strlen(bit_type_32);
12474         u32 sdma_engines = chip_sdma_engines(dd);
12475
12476         /* set up the stats timer; the add_timer is done at the end */
12477         timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12478
12479         /***********************/
12480         /* per device counters */
12481         /***********************/
12482
12483         /* size names and determine how many we have*/
12484         dd->ndevcntrs = 0;
12485         sz = 0;
12486
12487         for (i = 0; i < DEV_CNTR_LAST; i++) {
12488                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12489                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12490                         continue;
12491                 }
12492
12493                 if (dev_cntrs[i].flags & CNTR_VL) {
12494                         dev_cntrs[i].offset = dd->ndevcntrs;
12495                         for (j = 0; j < C_VL_COUNT; j++) {
12496                                 snprintf(name, C_MAX_NAME, "%s%d",
12497                                          dev_cntrs[i].name, vl_from_idx(j));
12498                                 sz += strlen(name);
12499                                 /* Add ",32" for 32-bit counters */
12500                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12501                                         sz += bit_type_32_sz;
12502                                 sz++;
12503                                 dd->ndevcntrs++;
12504                         }
12505                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12506                         dev_cntrs[i].offset = dd->ndevcntrs;
12507                         for (j = 0; j < sdma_engines; j++) {
12508                                 snprintf(name, C_MAX_NAME, "%s%d",
12509                                          dev_cntrs[i].name, j);
12510                                 sz += strlen(name);
12511                                 /* Add ",32" for 32-bit counters */
12512                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12513                                         sz += bit_type_32_sz;
12514                                 sz++;
12515                                 dd->ndevcntrs++;
12516                         }
12517                 } else {
12518                         /* +1 for newline. */
12519                         sz += strlen(dev_cntrs[i].name) + 1;
12520                         /* Add ",32" for 32-bit counters */
12521                         if (dev_cntrs[i].flags & CNTR_32BIT)
12522                                 sz += bit_type_32_sz;
12523                         dev_cntrs[i].offset = dd->ndevcntrs;
12524                         dd->ndevcntrs++;
12525                 }
12526         }
12527
12528         /* allocate space for the counter values */
12529         dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12530                             GFP_KERNEL);
12531         if (!dd->cntrs)
12532                 goto bail;
12533
12534         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12535         if (!dd->scntrs)
12536                 goto bail;
12537
12538         /* allocate space for the counter names */
12539         dd->cntrnameslen = sz;
12540         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12541         if (!dd->cntrnames)
12542                 goto bail;
12543
12544         /* fill in the names */
12545         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12546                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12547                         /* Nothing */
12548                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12549                         for (j = 0; j < C_VL_COUNT; j++) {
12550                                 snprintf(name, C_MAX_NAME, "%s%d",
12551                                          dev_cntrs[i].name,
12552                                          vl_from_idx(j));
12553                                 memcpy(p, name, strlen(name));
12554                                 p += strlen(name);
12555
12556                                 /* Counter is 32 bits */
12557                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12558                                         memcpy(p, bit_type_32, bit_type_32_sz);
12559                                         p += bit_type_32_sz;
12560                                 }
12561
12562                                 *p++ = '\n';
12563                         }
12564                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12565                         for (j = 0; j < sdma_engines; j++) {
12566                                 snprintf(name, C_MAX_NAME, "%s%d",
12567                                          dev_cntrs[i].name, j);
12568                                 memcpy(p, name, strlen(name));
12569                                 p += strlen(name);
12570
12571                                 /* Counter is 32 bits */
12572                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12573                                         memcpy(p, bit_type_32, bit_type_32_sz);
12574                                         p += bit_type_32_sz;
12575                                 }
12576
12577                                 *p++ = '\n';
12578                         }
12579                 } else {
12580                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12581                         p += strlen(dev_cntrs[i].name);
12582
12583                         /* Counter is 32 bits */
12584                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12585                                 memcpy(p, bit_type_32, bit_type_32_sz);
12586                                 p += bit_type_32_sz;
12587                         }
12588
12589                         *p++ = '\n';
12590                 }
12591         }
12592
12593         /*********************/
12594         /* per port counters */
12595         /*********************/
12596
12597         /*
12598          * Go through the counters for the overflows and disable the ones we
12599          * don't need. This varies based on platform so we need to do it
12600          * dynamically here.
12601          */
12602         rcv_ctxts = dd->num_rcv_contexts;
12603         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12604              i <= C_RCV_HDR_OVF_LAST; i++) {
12605                 port_cntrs[i].flags |= CNTR_DISABLED;
12606         }
12607
12608         /* size port counter names and determine how many we have*/
12609         sz = 0;
12610         dd->nportcntrs = 0;
12611         for (i = 0; i < PORT_CNTR_LAST; i++) {
12612                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12613                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12614                         continue;
12615                 }
12616
12617                 if (port_cntrs[i].flags & CNTR_VL) {
12618                         port_cntrs[i].offset = dd->nportcntrs;
12619                         for (j = 0; j < C_VL_COUNT; j++) {
12620                                 snprintf(name, C_MAX_NAME, "%s%d",
12621                                          port_cntrs[i].name, vl_from_idx(j));
12622                                 sz += strlen(name);
12623                                 /* Add ",32" for 32-bit counters */
12624                                 if (port_cntrs[i].flags & CNTR_32BIT)
12625                                         sz += bit_type_32_sz;
12626                                 sz++;
12627                                 dd->nportcntrs++;
12628                         }
12629                 } else {
12630                         /* +1 for newline */
12631                         sz += strlen(port_cntrs[i].name) + 1;
12632                         /* Add ",32" for 32-bit counters */
12633                         if (port_cntrs[i].flags & CNTR_32BIT)
12634                                 sz += bit_type_32_sz;
12635                         port_cntrs[i].offset = dd->nportcntrs;
12636                         dd->nportcntrs++;
12637                 }
12638         }
12639
12640         /* allocate space for the counter names */
12641         dd->portcntrnameslen = sz;
12642         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12643         if (!dd->portcntrnames)
12644                 goto bail;
12645
12646         /* fill in port cntr names */
12647         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12648                 if (port_cntrs[i].flags & CNTR_DISABLED)
12649                         continue;
12650
12651                 if (port_cntrs[i].flags & CNTR_VL) {
12652                         for (j = 0; j < C_VL_COUNT; j++) {
12653                                 snprintf(name, C_MAX_NAME, "%s%d",
12654                                          port_cntrs[i].name, vl_from_idx(j));
12655                                 memcpy(p, name, strlen(name));
12656                                 p += strlen(name);
12657
12658                                 /* Counter is 32 bits */
12659                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12660                                         memcpy(p, bit_type_32, bit_type_32_sz);
12661                                         p += bit_type_32_sz;
12662                                 }
12663
12664                                 *p++ = '\n';
12665                         }
12666                 } else {
12667                         memcpy(p, port_cntrs[i].name,
12668                                strlen(port_cntrs[i].name));
12669                         p += strlen(port_cntrs[i].name);
12670
12671                         /* Counter is 32 bits */
12672                         if (port_cntrs[i].flags & CNTR_32BIT) {
12673                                 memcpy(p, bit_type_32, bit_type_32_sz);
12674                                 p += bit_type_32_sz;
12675                         }
12676
12677                         *p++ = '\n';
12678                 }
12679         }
12680
12681         /* allocate per port storage for counter values */
12682         ppd = (struct hfi1_pportdata *)(dd + 1);
12683         for (i = 0; i < dd->num_pports; i++, ppd++) {
12684                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12685                 if (!ppd->cntrs)
12686                         goto bail;
12687
12688                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12689                 if (!ppd->scntrs)
12690                         goto bail;
12691         }
12692
12693         /* CPU counters need to be allocated and zeroed */
12694         if (init_cpu_counters(dd))
12695                 goto bail;
12696
12697         dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12698                                                      WQ_MEM_RECLAIM, dd->unit);
12699         if (!dd->update_cntr_wq)
12700                 goto bail;
12701
12702         INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12703
12704         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12705         return 0;
12706 bail:
12707         free_cntrs(dd);
12708         return -ENOMEM;
12709 }
12710
12711 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12712 {
12713         switch (chip_lstate) {
12714         default:
12715                 dd_dev_err(dd,
12716                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12717                            chip_lstate);
12718                 /* fall through */
12719         case LSTATE_DOWN:
12720                 return IB_PORT_DOWN;
12721         case LSTATE_INIT:
12722                 return IB_PORT_INIT;
12723         case LSTATE_ARMED:
12724                 return IB_PORT_ARMED;
12725         case LSTATE_ACTIVE:
12726                 return IB_PORT_ACTIVE;
12727         }
12728 }
12729
12730 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12731 {
12732         /* look at the HFI meta-states only */
12733         switch (chip_pstate & 0xf0) {
12734         default:
12735                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12736                            chip_pstate);
12737                 /* fall through */
12738         case PLS_DISABLED:
12739                 return IB_PORTPHYSSTATE_DISABLED;
12740         case PLS_OFFLINE:
12741                 return OPA_PORTPHYSSTATE_OFFLINE;
12742         case PLS_POLLING:
12743                 return IB_PORTPHYSSTATE_POLLING;
12744         case PLS_CONFIGPHY:
12745                 return IB_PORTPHYSSTATE_TRAINING;
12746         case PLS_LINKUP:
12747                 return IB_PORTPHYSSTATE_LINKUP;
12748         case PLS_PHYTEST:
12749                 return IB_PORTPHYSSTATE_PHY_TEST;
12750         }
12751 }
12752
12753 /* return the OPA port logical state name */
12754 const char *opa_lstate_name(u32 lstate)
12755 {
12756         static const char * const port_logical_names[] = {
12757                 "PORT_NOP",
12758                 "PORT_DOWN",
12759                 "PORT_INIT",
12760                 "PORT_ARMED",
12761                 "PORT_ACTIVE",
12762                 "PORT_ACTIVE_DEFER",
12763         };
12764         if (lstate < ARRAY_SIZE(port_logical_names))
12765                 return port_logical_names[lstate];
12766         return "unknown";
12767 }
12768
12769 /* return the OPA port physical state name */
12770 const char *opa_pstate_name(u32 pstate)
12771 {
12772         static const char * const port_physical_names[] = {
12773                 "PHYS_NOP",
12774                 "reserved1",
12775                 "PHYS_POLL",
12776                 "PHYS_DISABLED",
12777                 "PHYS_TRAINING",
12778                 "PHYS_LINKUP",
12779                 "PHYS_LINK_ERR_RECOVER",
12780                 "PHYS_PHY_TEST",
12781                 "reserved8",
12782                 "PHYS_OFFLINE",
12783                 "PHYS_GANGED",
12784                 "PHYS_TEST",
12785         };
12786         if (pstate < ARRAY_SIZE(port_physical_names))
12787                 return port_physical_names[pstate];
12788         return "unknown";
12789 }
12790
12791 /**
12792  * update_statusp - Update userspace status flag
12793  * @ppd: Port data structure
12794  * @state: port state information
12795  *
12796  * Actual port status is determined by the host_link_state value
12797  * in the ppd.
12798  *
12799  * host_link_state MUST be updated before updating the user space
12800  * statusp.
12801  */
12802 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12803 {
12804         /*
12805          * Set port status flags in the page mapped into userspace
12806          * memory. Do it here to ensure a reliable state - this is
12807          * the only function called by all state handling code.
12808          * Always set the flags due to the fact that the cache value
12809          * might have been changed explicitly outside of this
12810          * function.
12811          */
12812         if (ppd->statusp) {
12813                 switch (state) {
12814                 case IB_PORT_DOWN:
12815                 case IB_PORT_INIT:
12816                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12817                                            HFI1_STATUS_IB_READY);
12818                         break;
12819                 case IB_PORT_ARMED:
12820                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12821                         break;
12822                 case IB_PORT_ACTIVE:
12823                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12824                         break;
12825                 }
12826         }
12827         dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12828                     opa_lstate_name(state), state);
12829 }
12830
12831 /**
12832  * wait_logical_linkstate - wait for an IB link state change to occur
12833  * @ppd: port device
12834  * @state: the state to wait for
12835  * @msecs: the number of milliseconds to wait
12836  *
12837  * Wait up to msecs milliseconds for IB link state change to occur.
12838  * For now, take the easy polling route.
12839  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12840  */
12841 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12842                                   int msecs)
12843 {
12844         unsigned long timeout;
12845         u32 new_state;
12846
12847         timeout = jiffies + msecs_to_jiffies(msecs);
12848         while (1) {
12849                 new_state = chip_to_opa_lstate(ppd->dd,
12850                                                read_logical_state(ppd->dd));
12851                 if (new_state == state)
12852                         break;
12853                 if (time_after(jiffies, timeout)) {
12854                         dd_dev_err(ppd->dd,
12855                                    "timeout waiting for link state 0x%x\n",
12856                                    state);
12857                         return -ETIMEDOUT;
12858                 }
12859                 msleep(20);
12860         }
12861
12862         return 0;
12863 }
12864
12865 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12866 {
12867         u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12868
12869         dd_dev_info(ppd->dd,
12870                     "physical state changed to %s (0x%x), phy 0x%x\n",
12871                     opa_pstate_name(ib_pstate), ib_pstate, state);
12872 }
12873
12874 /*
12875  * Read the physical hardware link state and check if it matches host
12876  * drivers anticipated state.
12877  */
12878 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12879 {
12880         u32 read_state = read_physical_state(ppd->dd);
12881
12882         if (read_state == state) {
12883                 log_state_transition(ppd, state);
12884         } else {
12885                 dd_dev_err(ppd->dd,
12886                            "anticipated phy link state 0x%x, read 0x%x\n",
12887                            state, read_state);
12888         }
12889 }
12890
12891 /*
12892  * wait_physical_linkstate - wait for an physical link state change to occur
12893  * @ppd: port device
12894  * @state: the state to wait for
12895  * @msecs: the number of milliseconds to wait
12896  *
12897  * Wait up to msecs milliseconds for physical link state change to occur.
12898  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12899  */
12900 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12901                                    int msecs)
12902 {
12903         u32 read_state;
12904         unsigned long timeout;
12905
12906         timeout = jiffies + msecs_to_jiffies(msecs);
12907         while (1) {
12908                 read_state = read_physical_state(ppd->dd);
12909                 if (read_state == state)
12910                         break;
12911                 if (time_after(jiffies, timeout)) {
12912                         dd_dev_err(ppd->dd,
12913                                    "timeout waiting for phy link state 0x%x\n",
12914                                    state);
12915                         return -ETIMEDOUT;
12916                 }
12917                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12918         }
12919
12920         log_state_transition(ppd, state);
12921         return 0;
12922 }
12923
12924 /*
12925  * wait_phys_link_offline_quiet_substates - wait for any offline substate
12926  * @ppd: port device
12927  * @msecs: the number of milliseconds to wait
12928  *
12929  * Wait up to msecs milliseconds for any offline physical link
12930  * state change to occur.
12931  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12932  */
12933 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12934                                             int msecs)
12935 {
12936         u32 read_state;
12937         unsigned long timeout;
12938
12939         timeout = jiffies + msecs_to_jiffies(msecs);
12940         while (1) {
12941                 read_state = read_physical_state(ppd->dd);
12942                 if ((read_state & 0xF0) == PLS_OFFLINE)
12943                         break;
12944                 if (time_after(jiffies, timeout)) {
12945                         dd_dev_err(ppd->dd,
12946                                    "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12947                                    read_state, msecs);
12948                         return -ETIMEDOUT;
12949                 }
12950                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12951         }
12952
12953         log_state_transition(ppd, read_state);
12954         return read_state;
12955 }
12956
12957 /*
12958  * wait_phys_link_out_of_offline - wait for any out of offline state
12959  * @ppd: port device
12960  * @msecs: the number of milliseconds to wait
12961  *
12962  * Wait up to msecs milliseconds for any out of offline physical link
12963  * state change to occur.
12964  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12965  */
12966 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12967                                          int msecs)
12968 {
12969         u32 read_state;
12970         unsigned long timeout;
12971
12972         timeout = jiffies + msecs_to_jiffies(msecs);
12973         while (1) {
12974                 read_state = read_physical_state(ppd->dd);
12975                 if ((read_state & 0xF0) != PLS_OFFLINE)
12976                         break;
12977                 if (time_after(jiffies, timeout)) {
12978                         dd_dev_err(ppd->dd,
12979                                    "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12980                                    read_state, msecs);
12981                         return -ETIMEDOUT;
12982                 }
12983                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12984         }
12985
12986         log_state_transition(ppd, read_state);
12987         return read_state;
12988 }
12989
12990 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12991 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12992
12993 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12994 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12995
12996 void hfi1_init_ctxt(struct send_context *sc)
12997 {
12998         if (sc) {
12999                 struct hfi1_devdata *dd = sc->dd;
13000                 u64 reg;
13001                 u8 set = (sc->type == SC_USER ?
13002                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13003                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13004                 reg = read_kctxt_csr(dd, sc->hw_context,
13005                                      SEND_CTXT_CHECK_ENABLE);
13006                 if (set)
13007                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13008                 else
13009                         SET_STATIC_RATE_CONTROL_SMASK(reg);
13010                 write_kctxt_csr(dd, sc->hw_context,
13011                                 SEND_CTXT_CHECK_ENABLE, reg);
13012         }
13013 }
13014
13015 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13016 {
13017         int ret = 0;
13018         u64 reg;
13019
13020         if (dd->icode != ICODE_RTL_SILICON) {
13021                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13022                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13023                                     __func__);
13024                 return -EINVAL;
13025         }
13026         reg = read_csr(dd, ASIC_STS_THERM);
13027         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13028                       ASIC_STS_THERM_CURR_TEMP_MASK);
13029         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13030                         ASIC_STS_THERM_LO_TEMP_MASK);
13031         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13032                         ASIC_STS_THERM_HI_TEMP_MASK);
13033         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13034                           ASIC_STS_THERM_CRIT_TEMP_MASK);
13035         /* triggers is a 3-bit value - 1 bit per trigger. */
13036         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13037
13038         return ret;
13039 }
13040
13041 /**
13042  * get_int_mask - get 64 bit int mask
13043  * @dd - the devdata
13044  * @i - the csr (relative to CCE_INT_MASK)
13045  *
13046  * Returns the mask with the urgent interrupt mask
13047  * bit clear for kernel receive contexts.
13048  */
13049 static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
13050 {
13051         u64 mask = U64_MAX; /* default to no change */
13052
13053         if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
13054                 int j = (i - (IS_RCVURGENT_START / 64)) * 64;
13055                 int k = !j ? IS_RCVURGENT_START % 64 : 0;
13056
13057                 if (j)
13058                         j -= IS_RCVURGENT_START % 64;
13059                 /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
13060                 for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
13061                         /* convert to bit in mask and clear */
13062                         mask &= ~BIT_ULL(k);
13063         }
13064         return mask;
13065 }
13066
13067 /* ========================================================================= */
13068
13069 /*
13070  * Enable/disable chip from delivering interrupts.
13071  */
13072 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
13073 {
13074         int i;
13075
13076         /*
13077          * In HFI, the mask needs to be 1 to allow interrupts.
13078          */
13079         if (enable) {
13080                 /* enable all interrupts but urgent on kernel contexts */
13081                 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13082                         u64 mask = get_int_mask(dd, i);
13083
13084                         write_csr(dd, CCE_INT_MASK + (8 * i), mask);
13085                 }
13086
13087                 init_qsfp_int(dd);
13088         } else {
13089                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13090                         write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13091         }
13092 }
13093
13094 /*
13095  * Clear all interrupt sources on the chip.
13096  */
13097 static void clear_all_interrupts(struct hfi1_devdata *dd)
13098 {
13099         int i;
13100
13101         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13102                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13103
13104         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13105         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13106         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13107         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13108         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13109         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13110         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13111         for (i = 0; i < chip_send_contexts(dd); i++)
13112                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13113         for (i = 0; i < chip_sdma_engines(dd); i++)
13114                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13115
13116         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13117         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13118         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13119 }
13120
13121 /**
13122  * hfi1_clean_up_interrupts() - Free all IRQ resources
13123  * @dd: valid device data data structure
13124  *
13125  * Free the MSIx and assoicated PCI resources, if they have been allocated.
13126  */
13127 void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
13128 {
13129         int i;
13130         struct hfi1_msix_entry *me = dd->msix_entries;
13131
13132         /* remove irqs - must happen before disabling/turning off */
13133         for (i = 0; i < dd->num_msix_entries; i++, me++) {
13134                 if (!me->arg) /* => no irq, no affinity */
13135                         continue;
13136                 hfi1_put_irq_affinity(dd, me);
13137                 pci_free_irq(dd->pcidev, i, me->arg);
13138         }
13139
13140         /* clean structures */
13141         kfree(dd->msix_entries);
13142         dd->msix_entries = NULL;
13143         dd->num_msix_entries = 0;
13144
13145         pci_free_irq_vectors(dd->pcidev);
13146 }
13147
13148 /*
13149  * Remap the interrupt source from the general handler to the given MSI-X
13150  * interrupt.
13151  */
13152 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13153 {
13154         u64 reg;
13155         int m, n;
13156
13157         /* clear from the handled mask of the general interrupt */
13158         m = isrc / 64;
13159         n = isrc % 64;
13160         if (likely(m < CCE_NUM_INT_CSRS)) {
13161                 dd->gi_mask[m] &= ~((u64)1 << n);
13162         } else {
13163                 dd_dev_err(dd, "remap interrupt err\n");
13164                 return;
13165         }
13166
13167         /* direct the chip source to the given MSI-X interrupt */
13168         m = isrc / 8;
13169         n = isrc % 8;
13170         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13171         reg &= ~((u64)0xff << (8 * n));
13172         reg |= ((u64)msix_intr & 0xff) << (8 * n);
13173         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13174 }
13175
13176 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13177                                   int engine, int msix_intr)
13178 {
13179         /*
13180          * SDMA engine interrupt sources grouped by type, rather than
13181          * engine.  Per-engine interrupts are as follows:
13182          *      SDMA
13183          *      SDMAProgress
13184          *      SDMAIdle
13185          */
13186         remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13187                    msix_intr);
13188         remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13189                    msix_intr);
13190         remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13191                    msix_intr);
13192 }
13193
13194 static int request_msix_irqs(struct hfi1_devdata *dd)
13195 {
13196         int first_general, last_general;
13197         int first_sdma, last_sdma;
13198         int first_rx, last_rx;
13199         int i, ret = 0;
13200
13201         /* calculate the ranges we are going to use */
13202         first_general = 0;
13203         last_general = first_general + 1;
13204         first_sdma = last_general;
13205         last_sdma = first_sdma + dd->num_sdma;
13206         first_rx = last_sdma;
13207         last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
13208
13209         /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13210         dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13211
13212         /*
13213          * Sanity check - the code expects all SDMA chip source
13214          * interrupts to be in the same CSR, starting at bit 0.  Verify
13215          * that this is true by checking the bit location of the start.
13216          */
13217         BUILD_BUG_ON(IS_SDMA_START % 64);
13218
13219         for (i = 0; i < dd->num_msix_entries; i++) {
13220                 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13221                 const char *err_info;
13222                 irq_handler_t handler;
13223                 irq_handler_t thread = NULL;
13224                 void *arg = NULL;
13225                 int idx;
13226                 struct hfi1_ctxtdata *rcd = NULL;
13227                 struct sdma_engine *sde = NULL;
13228                 char name[MAX_NAME_SIZE];
13229
13230                 /* obtain the arguments to pci_request_irq */
13231                 if (first_general <= i && i < last_general) {
13232                         idx = i - first_general;
13233                         handler = general_interrupt;
13234                         arg = dd;
13235                         snprintf(name, sizeof(name),
13236                                  DRIVER_NAME "_%d", dd->unit);
13237                         err_info = "general";
13238                         me->type = IRQ_GENERAL;
13239                 } else if (first_sdma <= i && i < last_sdma) {
13240                         idx = i - first_sdma;
13241                         sde = &dd->per_sdma[idx];
13242                         handler = sdma_interrupt;
13243                         arg = sde;
13244                         snprintf(name, sizeof(name),
13245                                  DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13246                         err_info = "sdma";
13247                         remap_sdma_interrupts(dd, idx, i);
13248                         me->type = IRQ_SDMA;
13249                 } else if (first_rx <= i && i < last_rx) {
13250                         idx = i - first_rx;
13251                         rcd = hfi1_rcd_get_by_index_safe(dd, idx);
13252                         if (rcd) {
13253                                 /*
13254                                  * Set the interrupt register and mask for this
13255                                  * context's interrupt.
13256                                  */
13257                                 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13258                                 rcd->imask = ((u64)1) <<
13259                                           ((IS_RCVAVAIL_START + idx) % 64);
13260                                 handler = receive_context_interrupt;
13261                                 thread = receive_context_thread;
13262                                 arg = rcd;
13263                                 snprintf(name, sizeof(name),
13264                                          DRIVER_NAME "_%d kctxt%d",
13265                                          dd->unit, idx);
13266                                 err_info = "receive context";
13267                                 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13268                                 me->type = IRQ_RCVCTXT;
13269                                 rcd->msix_intr = i;
13270                                 hfi1_rcd_put(rcd);
13271                         }
13272                 } else {
13273                         /* not in our expected range - complain, then
13274                          * ignore it
13275                          */
13276                         dd_dev_err(dd,
13277                                    "Unexpected extra MSI-X interrupt %d\n", i);
13278                         continue;
13279                 }
13280                 /* no argument, no interrupt */
13281                 if (!arg)
13282                         continue;
13283                 /* make sure the name is terminated */
13284                 name[sizeof(name) - 1] = 0;
13285                 me->irq = pci_irq_vector(dd->pcidev, i);
13286                 ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
13287                                       name);
13288                 if (ret) {
13289                         dd_dev_err(dd,
13290                                    "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13291                                    err_info, me->irq, idx, ret);
13292                         return ret;
13293                 }
13294                 /*
13295                  * assign arg after pci_request_irq call, so it will be
13296                  * cleaned up
13297                  */
13298                 me->arg = arg;
13299
13300                 ret = hfi1_get_irq_affinity(dd, me);
13301                 if (ret)
13302                         dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13303         }
13304
13305         return ret;
13306 }
13307
13308 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13309 {
13310         int i;
13311
13312         for (i = 0; i < dd->vnic.num_ctxt; i++) {
13313                 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13314                 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13315
13316                 synchronize_irq(me->irq);
13317         }
13318 }
13319
13320 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13321 {
13322         struct hfi1_devdata *dd = rcd->dd;
13323         struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13324
13325         if (!me->arg) /* => no irq, no affinity */
13326                 return;
13327
13328         hfi1_put_irq_affinity(dd, me);
13329         pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13330
13331         me->arg = NULL;
13332 }
13333
13334 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13335 {
13336         struct hfi1_devdata *dd = rcd->dd;
13337         struct hfi1_msix_entry *me;
13338         int idx = rcd->ctxt;
13339         void *arg = rcd;
13340         int ret;
13341
13342         rcd->msix_intr = dd->vnic.msix_idx++;
13343         me = &dd->msix_entries[rcd->msix_intr];
13344
13345         /*
13346          * Set the interrupt register and mask for this
13347          * context's interrupt.
13348          */
13349         rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13350         rcd->imask = ((u64)1) <<
13351                   ((IS_RCVAVAIL_START + idx) % 64);
13352         me->type = IRQ_RCVCTXT;
13353         me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13354         remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13355
13356         ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
13357                               receive_context_interrupt,
13358                               receive_context_thread, arg,
13359                               DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13360         if (ret) {
13361                 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13362                            me->irq, idx, ret);
13363                 return;
13364         }
13365         /*
13366          * assign arg after pci_request_irq call, so it will be
13367          * cleaned up
13368          */
13369         me->arg = arg;
13370
13371         ret = hfi1_get_irq_affinity(dd, me);
13372         if (ret) {
13373                 dd_dev_err(dd,
13374                            "unable to pin IRQ %d\n", ret);
13375                 pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13376         }
13377 }
13378
13379 /*
13380  * Set the general handler to accept all interrupts, remap all
13381  * chip interrupts back to MSI-X 0.
13382  */
13383 static void reset_interrupts(struct hfi1_devdata *dd)
13384 {
13385         int i;
13386
13387         /* all interrupts handled by the general handler */
13388         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13389                 dd->gi_mask[i] = ~(u64)0;
13390
13391         /* all chip interrupts map to MSI-X 0 */
13392         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13393                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13394 }
13395
13396 static int set_up_interrupts(struct hfi1_devdata *dd)
13397 {
13398         u32 total;
13399         int ret, request;
13400
13401         /*
13402          * Interrupt count:
13403          *      1 general, "slow path" interrupt (includes the SDMA engines
13404          *              slow source, SDMACleanupDone)
13405          *      N interrupts - one per used SDMA engine
13406          *      M interrupt - one per kernel receive context
13407          *      V interrupt - one for each VNIC context
13408          */
13409         total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
13410
13411         /* ask for MSI-X interrupts */
13412         request = request_msix(dd, total);
13413         if (request < 0) {
13414                 ret = request;
13415                 goto fail;
13416         } else {
13417                 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13418                                            GFP_KERNEL);
13419                 if (!dd->msix_entries) {
13420                         ret = -ENOMEM;
13421                         goto fail;
13422                 }
13423                 /* using MSI-X */
13424                 dd->num_msix_entries = total;
13425                 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13426         }
13427
13428         /* mask all interrupts */
13429         set_intr_state(dd, 0);
13430         /* clear all pending interrupts */
13431         clear_all_interrupts(dd);
13432
13433         /* reset general handler mask, chip MSI-X mappings */
13434         reset_interrupts(dd);
13435
13436         ret = request_msix_irqs(dd);
13437         if (ret)
13438                 goto fail;
13439
13440         return 0;
13441
13442 fail:
13443         hfi1_clean_up_interrupts(dd);
13444         return ret;
13445 }
13446
13447 /*
13448  * Set up context values in dd.  Sets:
13449  *
13450  *      num_rcv_contexts - number of contexts being used
13451  *      n_krcv_queues - number of kernel contexts
13452  *      first_dyn_alloc_ctxt - first dynamically allocated context
13453  *                             in array of contexts
13454  *      freectxts  - number of free user contexts
13455  *      num_send_contexts - number of PIO send contexts being used
13456  *      num_vnic_contexts - number of contexts reserved for VNIC
13457  */
13458 static int set_up_context_variables(struct hfi1_devdata *dd)
13459 {
13460         unsigned long num_kernel_contexts;
13461         u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13462         int total_contexts;
13463         int ret;
13464         unsigned ngroups;
13465         int rmt_count;
13466         int user_rmt_reduced;
13467         u32 n_usr_ctxts;
13468         u32 send_contexts = chip_send_contexts(dd);
13469         u32 rcv_contexts = chip_rcv_contexts(dd);
13470
13471         /*
13472          * Kernel receive contexts:
13473          * - Context 0 - control context (VL15/multicast/error)
13474          * - Context 1 - first kernel context
13475          * - Context 2 - second kernel context
13476          * ...
13477          */
13478         if (n_krcvqs)
13479                 /*
13480                  * n_krcvqs is the sum of module parameter kernel receive
13481                  * contexts, krcvqs[].  It does not include the control
13482                  * context, so add that.
13483                  */
13484                 num_kernel_contexts = n_krcvqs + 1;
13485         else
13486                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13487         /*
13488          * Every kernel receive context needs an ACK send context.
13489          * one send context is allocated for each VL{0-7} and VL15
13490          */
13491         if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13492                 dd_dev_err(dd,
13493                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
13494                            send_contexts - num_vls - 1,
13495                            num_kernel_contexts);
13496                 num_kernel_contexts = send_contexts - num_vls - 1;
13497         }
13498
13499         /* Accommodate VNIC contexts if possible */
13500         if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13501                 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13502                 num_vnic_contexts = 0;
13503         }
13504         total_contexts = num_kernel_contexts + num_vnic_contexts;
13505
13506         /*
13507          * User contexts:
13508          *      - default to 1 user context per real (non-HT) CPU core if
13509          *        num_user_contexts is negative
13510          */
13511         if (num_user_contexts < 0)
13512                 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13513         else
13514                 n_usr_ctxts = num_user_contexts;
13515         /*
13516          * Adjust the counts given a global max.
13517          */
13518         if (total_contexts + n_usr_ctxts > rcv_contexts) {
13519                 dd_dev_err(dd,
13520                            "Reducing # user receive contexts to: %d, from %u\n",
13521                            rcv_contexts - total_contexts,
13522                            n_usr_ctxts);
13523                 /* recalculate */
13524                 n_usr_ctxts = rcv_contexts - total_contexts;
13525         }
13526
13527         /*
13528          * The RMT entries are currently allocated as shown below:
13529          * 1. QOS (0 to 128 entries);
13530          * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
13531          * 3. VNIC (num_vnic_contexts).
13532          * It should be noted that PSM FECN oversubscribe num_vnic_contexts
13533          * entries of RMT because both VNIC and PSM could allocate any receive
13534          * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13535          * and PSM FECN must reserve an RMT entry for each possible PSM receive
13536          * context.
13537          */
13538         rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13539         if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13540                 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13541                 dd_dev_err(dd,
13542                            "RMT size is reducing the number of user receive contexts from %u to %d\n",
13543                            n_usr_ctxts,
13544                            user_rmt_reduced);
13545                 /* recalculate */
13546                 n_usr_ctxts = user_rmt_reduced;
13547         }
13548
13549         total_contexts += n_usr_ctxts;
13550
13551         /* the first N are kernel contexts, the rest are user/vnic contexts */
13552         dd->num_rcv_contexts = total_contexts;
13553         dd->n_krcv_queues = num_kernel_contexts;
13554         dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13555         dd->num_vnic_contexts = num_vnic_contexts;
13556         dd->num_user_contexts = n_usr_ctxts;
13557         dd->freectxts = n_usr_ctxts;
13558         dd_dev_info(dd,
13559                     "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13560                     rcv_contexts,
13561                     (int)dd->num_rcv_contexts,
13562                     (int)dd->n_krcv_queues,
13563                     dd->num_vnic_contexts,
13564                     dd->num_user_contexts);
13565
13566         /*
13567          * Receive array allocation:
13568          *   All RcvArray entries are divided into groups of 8. This
13569          *   is required by the hardware and will speed up writes to
13570          *   consecutive entries by using write-combining of the entire
13571          *   cacheline.
13572          *
13573          *   The number of groups are evenly divided among all contexts.
13574          *   any left over groups will be given to the first N user
13575          *   contexts.
13576          */
13577         dd->rcv_entries.group_size = RCV_INCREMENT;
13578         ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13579         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13580         dd->rcv_entries.nctxt_extra = ngroups -
13581                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13582         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13583                     dd->rcv_entries.ngroups,
13584                     dd->rcv_entries.nctxt_extra);
13585         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13586             MAX_EAGER_ENTRIES * 2) {
13587                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13588                         dd->rcv_entries.group_size;
13589                 dd_dev_info(dd,
13590                             "RcvArray group count too high, change to %u\n",
13591                             dd->rcv_entries.ngroups);
13592                 dd->rcv_entries.nctxt_extra = 0;
13593         }
13594         /*
13595          * PIO send contexts
13596          */
13597         ret = init_sc_pools_and_sizes(dd);
13598         if (ret >= 0) { /* success */
13599                 dd->num_send_contexts = ret;
13600                 dd_dev_info(
13601                         dd,
13602                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13603                         send_contexts,
13604                         dd->num_send_contexts,
13605                         dd->sc_sizes[SC_KERNEL].count,
13606                         dd->sc_sizes[SC_ACK].count,
13607                         dd->sc_sizes[SC_USER].count,
13608                         dd->sc_sizes[SC_VL15].count);
13609                 ret = 0;        /* success */
13610         }
13611
13612         return ret;
13613 }
13614
13615 /*
13616  * Set the device/port partition key table. The MAD code
13617  * will ensure that, at least, the partial management
13618  * partition key is present in the table.
13619  */
13620 static void set_partition_keys(struct hfi1_pportdata *ppd)
13621 {
13622         struct hfi1_devdata *dd = ppd->dd;
13623         u64 reg = 0;
13624         int i;
13625
13626         dd_dev_info(dd, "Setting partition keys\n");
13627         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13628                 reg |= (ppd->pkeys[i] &
13629                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13630                         ((i % 4) *
13631                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13632                 /* Each register holds 4 PKey values. */
13633                 if ((i % 4) == 3) {
13634                         write_csr(dd, RCV_PARTITION_KEY +
13635                                   ((i - 3) * 2), reg);
13636                         reg = 0;
13637                 }
13638         }
13639
13640         /* Always enable HW pkeys check when pkeys table is set */
13641         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13642 }
13643
13644 /*
13645  * These CSRs and memories are uninitialized on reset and must be
13646  * written before reading to set the ECC/parity bits.
13647  *
13648  * NOTE: All user context CSRs that are not mmaped write-only
13649  * (e.g. the TID flows) must be initialized even if the driver never
13650  * reads them.
13651  */
13652 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13653 {
13654         int i, j;
13655
13656         /* CceIntMap */
13657         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13658                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13659
13660         /* SendCtxtCreditReturnAddr */
13661         for (i = 0; i < chip_send_contexts(dd); i++)
13662                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13663
13664         /* PIO Send buffers */
13665         /* SDMA Send buffers */
13666         /*
13667          * These are not normally read, and (presently) have no method
13668          * to be read, so are not pre-initialized
13669          */
13670
13671         /* RcvHdrAddr */
13672         /* RcvHdrTailAddr */
13673         /* RcvTidFlowTable */
13674         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13675                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13676                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13677                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13678                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13679         }
13680
13681         /* RcvArray */
13682         for (i = 0; i < chip_rcv_array_count(dd); i++)
13683                 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13684
13685         /* RcvQPMapTable */
13686         for (i = 0; i < 32; i++)
13687                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13688 }
13689
13690 /*
13691  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13692  */
13693 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13694                              u64 ctrl_bits)
13695 {
13696         unsigned long timeout;
13697         u64 reg;
13698
13699         /* is the condition present? */
13700         reg = read_csr(dd, CCE_STATUS);
13701         if ((reg & status_bits) == 0)
13702                 return;
13703
13704         /* clear the condition */
13705         write_csr(dd, CCE_CTRL, ctrl_bits);
13706
13707         /* wait for the condition to clear */
13708         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13709         while (1) {
13710                 reg = read_csr(dd, CCE_STATUS);
13711                 if ((reg & status_bits) == 0)
13712                         return;
13713                 if (time_after(jiffies, timeout)) {
13714                         dd_dev_err(dd,
13715                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13716                                    status_bits, reg & status_bits);
13717                         return;
13718                 }
13719                 udelay(1);
13720         }
13721 }
13722
13723 /* set CCE CSRs to chip reset defaults */
13724 static void reset_cce_csrs(struct hfi1_devdata *dd)
13725 {
13726         int i;
13727
13728         /* CCE_REVISION read-only */
13729         /* CCE_REVISION2 read-only */
13730         /* CCE_CTRL - bits clear automatically */
13731         /* CCE_STATUS read-only, use CceCtrl to clear */
13732         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13733         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13734         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13735         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13736                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13737         /* CCE_ERR_STATUS read-only */
13738         write_csr(dd, CCE_ERR_MASK, 0);
13739         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13740         /* CCE_ERR_FORCE leave alone */
13741         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13742                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13743         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13744         /* CCE_PCIE_CTRL leave alone */
13745         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13746                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13747                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13748                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13749         }
13750         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13751                 /* CCE_MSIX_PBA read-only */
13752                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13753                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13754         }
13755         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13756                 write_csr(dd, CCE_INT_MAP, 0);
13757         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13758                 /* CCE_INT_STATUS read-only */
13759                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13760                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13761                 /* CCE_INT_FORCE leave alone */
13762                 /* CCE_INT_BLOCKED read-only */
13763         }
13764         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13765                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13766 }
13767
13768 /* set MISC CSRs to chip reset defaults */
13769 static void reset_misc_csrs(struct hfi1_devdata *dd)
13770 {
13771         int i;
13772
13773         for (i = 0; i < 32; i++) {
13774                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13775                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13776                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13777         }
13778         /*
13779          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13780          * only be written 128-byte chunks
13781          */
13782         /* init RSA engine to clear lingering errors */
13783         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13784         write_csr(dd, MISC_CFG_RSA_MU, 0);
13785         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13786         /* MISC_STS_8051_DIGEST read-only */
13787         /* MISC_STS_SBM_DIGEST read-only */
13788         /* MISC_STS_PCIE_DIGEST read-only */
13789         /* MISC_STS_FAB_DIGEST read-only */
13790         /* MISC_ERR_STATUS read-only */
13791         write_csr(dd, MISC_ERR_MASK, 0);
13792         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13793         /* MISC_ERR_FORCE leave alone */
13794 }
13795
13796 /* set TXE CSRs to chip reset defaults */
13797 static void reset_txe_csrs(struct hfi1_devdata *dd)
13798 {
13799         int i;
13800
13801         /*
13802          * TXE Kernel CSRs
13803          */
13804         write_csr(dd, SEND_CTRL, 0);
13805         __cm_reset(dd, 0);      /* reset CM internal state */
13806         /* SEND_CONTEXTS read-only */
13807         /* SEND_DMA_ENGINES read-only */
13808         /* SEND_PIO_MEM_SIZE read-only */
13809         /* SEND_DMA_MEM_SIZE read-only */
13810         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13811         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13812         /* SEND_PIO_ERR_STATUS read-only */
13813         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13814         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13815         /* SEND_PIO_ERR_FORCE leave alone */
13816         /* SEND_DMA_ERR_STATUS read-only */
13817         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13818         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13819         /* SEND_DMA_ERR_FORCE leave alone */
13820         /* SEND_EGRESS_ERR_STATUS read-only */
13821         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13822         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13823         /* SEND_EGRESS_ERR_FORCE leave alone */
13824         write_csr(dd, SEND_BTH_QP, 0);
13825         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13826         write_csr(dd, SEND_SC2VLT0, 0);
13827         write_csr(dd, SEND_SC2VLT1, 0);
13828         write_csr(dd, SEND_SC2VLT2, 0);
13829         write_csr(dd, SEND_SC2VLT3, 0);
13830         write_csr(dd, SEND_LEN_CHECK0, 0);
13831         write_csr(dd, SEND_LEN_CHECK1, 0);
13832         /* SEND_ERR_STATUS read-only */
13833         write_csr(dd, SEND_ERR_MASK, 0);
13834         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13835         /* SEND_ERR_FORCE read-only */
13836         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13837                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13838         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13839                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13840         for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13841                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13842         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13843                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13844         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13845                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13846         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13847         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13848         /* SEND_CM_CREDIT_USED_STATUS read-only */
13849         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13850         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13851         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13852         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13853         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13854         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13855                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13856         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13857         /* SEND_CM_CREDIT_USED_VL read-only */
13858         /* SEND_CM_CREDIT_USED_VL15 read-only */
13859         /* SEND_EGRESS_CTXT_STATUS read-only */
13860         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13861         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13862         /* SEND_EGRESS_ERR_INFO read-only */
13863         /* SEND_EGRESS_ERR_SOURCE read-only */
13864
13865         /*
13866          * TXE Per-Context CSRs
13867          */
13868         for (i = 0; i < chip_send_contexts(dd); i++) {
13869                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13870                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13871                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13872                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13873                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13874                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13875                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13876                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13877                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13878                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13879                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13880                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13881         }
13882
13883         /*
13884          * TXE Per-SDMA CSRs
13885          */
13886         for (i = 0; i < chip_sdma_engines(dd); i++) {
13887                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13888                 /* SEND_DMA_STATUS read-only */
13889                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13890                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13891                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13892                 /* SEND_DMA_HEAD read-only */
13893                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13894                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13895                 /* SEND_DMA_IDLE_CNT read-only */
13896                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13897                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13898                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13899                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13900                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13901                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13902                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13903                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13904                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13905                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13906                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13907                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13908                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13909                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13910         }
13911 }
13912
13913 /*
13914  * Expect on entry:
13915  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13916  */
13917 static void init_rbufs(struct hfi1_devdata *dd)
13918 {
13919         u64 reg;
13920         int count;
13921
13922         /*
13923          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13924          * clear.
13925          */
13926         count = 0;
13927         while (1) {
13928                 reg = read_csr(dd, RCV_STATUS);
13929                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13930                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13931                         break;
13932                 /*
13933                  * Give up after 1ms - maximum wait time.
13934                  *
13935                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13936                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13937                  *      136 KB / (66% * 250MB/s) = 844us
13938                  */
13939                 if (count++ > 500) {
13940                         dd_dev_err(dd,
13941                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13942                                    __func__, reg);
13943                         break;
13944                 }
13945                 udelay(2); /* do not busy-wait the CSR */
13946         }
13947
13948         /* start the init - expect RcvCtrl to be 0 */
13949         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13950
13951         /*
13952          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13953          * period after the write before RcvStatus.RxRbufInitDone is valid.
13954          * The delay in the first run through the loop below is sufficient and
13955          * required before the first read of RcvStatus.RxRbufInintDone.
13956          */
13957         read_csr(dd, RCV_CTRL);
13958
13959         /* wait for the init to finish */
13960         count = 0;
13961         while (1) {
13962                 /* delay is required first time through - see above */
13963                 udelay(2); /* do not busy-wait the CSR */
13964                 reg = read_csr(dd, RCV_STATUS);
13965                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13966                         break;
13967
13968                 /* give up after 100us - slowest possible at 33MHz is 73us */
13969                 if (count++ > 50) {
13970                         dd_dev_err(dd,
13971                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13972                                    __func__);
13973                         break;
13974                 }
13975         }
13976 }
13977
13978 /* set RXE CSRs to chip reset defaults */
13979 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13980 {
13981         int i, j;
13982
13983         /*
13984          * RXE Kernel CSRs
13985          */
13986         write_csr(dd, RCV_CTRL, 0);
13987         init_rbufs(dd);
13988         /* RCV_STATUS read-only */
13989         /* RCV_CONTEXTS read-only */
13990         /* RCV_ARRAY_CNT read-only */
13991         /* RCV_BUF_SIZE read-only */
13992         write_csr(dd, RCV_BTH_QP, 0);
13993         write_csr(dd, RCV_MULTICAST, 0);
13994         write_csr(dd, RCV_BYPASS, 0);
13995         write_csr(dd, RCV_VL15, 0);
13996         /* this is a clear-down */
13997         write_csr(dd, RCV_ERR_INFO,
13998                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13999         /* RCV_ERR_STATUS read-only */
14000         write_csr(dd, RCV_ERR_MASK, 0);
14001         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
14002         /* RCV_ERR_FORCE leave alone */
14003         for (i = 0; i < 32; i++)
14004                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
14005         for (i = 0; i < 4; i++)
14006                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
14007         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
14008                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
14009         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
14010                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
14011         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
14012                 clear_rsm_rule(dd, i);
14013         for (i = 0; i < 32; i++)
14014                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
14015
14016         /*
14017          * RXE Kernel and User Per-Context CSRs
14018          */
14019         for (i = 0; i < chip_rcv_contexts(dd); i++) {
14020                 /* kernel */
14021                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
14022                 /* RCV_CTXT_STATUS read-only */
14023                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
14024                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
14025                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
14026                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
14027                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
14028                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
14029                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
14030                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
14031                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
14032                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
14033
14034                 /* user */
14035                 /* RCV_HDR_TAIL read-only */
14036                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
14037                 /* RCV_EGR_INDEX_TAIL read-only */
14038                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
14039                 /* RCV_EGR_OFFSET_TAIL read-only */
14040                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
14041                         write_uctxt_csr(dd, i,
14042                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
14043                 }
14044         }
14045 }
14046
14047 /*
14048  * Set sc2vl tables.
14049  *
14050  * They power on to zeros, so to avoid send context errors
14051  * they need to be set:
14052  *
14053  * SC 0-7 -> VL 0-7 (respectively)
14054  * SC 15  -> VL 15
14055  * otherwise
14056  *        -> VL 0
14057  */
14058 static void init_sc2vl_tables(struct hfi1_devdata *dd)
14059 {
14060         int i;
14061         /* init per architecture spec, constrained by hardware capability */
14062
14063         /* HFI maps sent packets */
14064         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
14065                 0,
14066                 0, 0, 1, 1,
14067                 2, 2, 3, 3,
14068                 4, 4, 5, 5,
14069                 6, 6, 7, 7));
14070         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
14071                 1,
14072                 8, 0, 9, 0,
14073                 10, 0, 11, 0,
14074                 12, 0, 13, 0,
14075                 14, 0, 15, 15));
14076         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
14077                 2,
14078                 16, 0, 17, 0,
14079                 18, 0, 19, 0,
14080                 20, 0, 21, 0,
14081                 22, 0, 23, 0));
14082         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
14083                 3,
14084                 24, 0, 25, 0,
14085                 26, 0, 27, 0,
14086                 28, 0, 29, 0,
14087                 30, 0, 31, 0));
14088
14089         /* DC maps received packets */
14090         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
14091                 15_0,
14092                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
14093                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
14094         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
14095                 31_16,
14096                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
14097                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14098
14099         /* initialize the cached sc2vl values consistently with h/w */
14100         for (i = 0; i < 32; i++) {
14101                 if (i < 8 || i == 15)
14102                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
14103                 else
14104                         *((u8 *)(dd->sc2vl) + i) = 0;
14105         }
14106 }
14107
14108 /*
14109  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
14110  * depend on the chip going through a power-on reset - a driver may be loaded
14111  * and unloaded many times.
14112  *
14113  * Do not write any CSR values to the chip in this routine - there may be
14114  * a reset following the (possible) FLR in this routine.
14115  *
14116  */
14117 static int init_chip(struct hfi1_devdata *dd)
14118 {
14119         int i;
14120         int ret = 0;
14121
14122         /*
14123          * Put the HFI CSRs in a known state.
14124          * Combine this with a DC reset.
14125          *
14126          * Stop the device from doing anything while we do a
14127          * reset.  We know there are no other active users of
14128          * the device since we are now in charge.  Turn off
14129          * off all outbound and inbound traffic and make sure
14130          * the device does not generate any interrupts.
14131          */
14132
14133         /* disable send contexts and SDMA engines */
14134         write_csr(dd, SEND_CTRL, 0);
14135         for (i = 0; i < chip_send_contexts(dd); i++)
14136                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14137         for (i = 0; i < chip_sdma_engines(dd); i++)
14138                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14139         /* disable port (turn off RXE inbound traffic) and contexts */
14140         write_csr(dd, RCV_CTRL, 0);
14141         for (i = 0; i < chip_rcv_contexts(dd); i++)
14142                 write_csr(dd, RCV_CTXT_CTRL, 0);
14143         /* mask all interrupt sources */
14144         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14145                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14146
14147         /*
14148          * DC Reset: do a full DC reset before the register clear.
14149          * A recommended length of time to hold is one CSR read,
14150          * so reread the CceDcCtrl.  Then, hold the DC in reset
14151          * across the clear.
14152          */
14153         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14154         (void)read_csr(dd, CCE_DC_CTRL);
14155
14156         if (use_flr) {
14157                 /*
14158                  * A FLR will reset the SPC core and part of the PCIe.
14159                  * The parts that need to be restored have already been
14160                  * saved.
14161                  */
14162                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14163
14164                 /* do the FLR, the DC reset will remain */
14165                 pcie_flr(dd->pcidev);
14166
14167                 /* restore command and BARs */
14168                 ret = restore_pci_variables(dd);
14169                 if (ret) {
14170                         dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14171                                    __func__);
14172                         return ret;
14173                 }
14174
14175                 if (is_ax(dd)) {
14176                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
14177                         pcie_flr(dd->pcidev);
14178                         ret = restore_pci_variables(dd);
14179                         if (ret) {
14180                                 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14181                                            __func__);
14182                                 return ret;
14183                         }
14184                 }
14185         } else {
14186                 dd_dev_info(dd, "Resetting CSRs with writes\n");
14187                 reset_cce_csrs(dd);
14188                 reset_txe_csrs(dd);
14189                 reset_rxe_csrs(dd);
14190                 reset_misc_csrs(dd);
14191         }
14192         /* clear the DC reset */
14193         write_csr(dd, CCE_DC_CTRL, 0);
14194
14195         /* Set the LED off */
14196         setextled(dd, 0);
14197
14198         /*
14199          * Clear the QSFP reset.
14200          * An FLR enforces a 0 on all out pins. The driver does not touch
14201          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
14202          * anything plugged constantly in reset, if it pays attention
14203          * to RESET_N.
14204          * Prime examples of this are optical cables. Set all pins high.
14205          * I2CCLK and I2CDAT will change per direction, and INT_N and
14206          * MODPRS_N are input only and their value is ignored.
14207          */
14208         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14209         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14210         init_chip_resources(dd);
14211         return ret;
14212 }
14213
14214 static void init_early_variables(struct hfi1_devdata *dd)
14215 {
14216         int i;
14217
14218         /* assign link credit variables */
14219         dd->vau = CM_VAU;
14220         dd->link_credits = CM_GLOBAL_CREDITS;
14221         if (is_ax(dd))
14222                 dd->link_credits--;
14223         dd->vcu = cu_to_vcu(hfi1_cu);
14224         /* enough room for 8 MAD packets plus header - 17K */
14225         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14226         if (dd->vl15_init > dd->link_credits)
14227                 dd->vl15_init = dd->link_credits;
14228
14229         write_uninitialized_csrs_and_memories(dd);
14230
14231         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14232                 for (i = 0; i < dd->num_pports; i++) {
14233                         struct hfi1_pportdata *ppd = &dd->pport[i];
14234
14235                         set_partition_keys(ppd);
14236                 }
14237         init_sc2vl_tables(dd);
14238 }
14239
14240 static void init_kdeth_qp(struct hfi1_devdata *dd)
14241 {
14242         /* user changed the KDETH_QP */
14243         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14244                 /* out of range or illegal value */
14245                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14246                 kdeth_qp = 0;
14247         }
14248         if (kdeth_qp == 0)      /* not set, or failed range check */
14249                 kdeth_qp = DEFAULT_KDETH_QP;
14250
14251         write_csr(dd, SEND_BTH_QP,
14252                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14253                   SEND_BTH_QP_KDETH_QP_SHIFT);
14254
14255         write_csr(dd, RCV_BTH_QP,
14256                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14257                   RCV_BTH_QP_KDETH_QP_SHIFT);
14258 }
14259
14260 /**
14261  * init_qpmap_table
14262  * @dd - device data
14263  * @first_ctxt - first context
14264  * @last_ctxt - first context
14265  *
14266  * This return sets the qpn mapping table that
14267  * is indexed by qpn[8:1].
14268  *
14269  * The routine will round robin the 256 settings
14270  * from first_ctxt to last_ctxt.
14271  *
14272  * The first/last looks ahead to having specialized
14273  * receive contexts for mgmt and bypass.  Normal
14274  * verbs traffic will assumed to be on a range
14275  * of receive contexts.
14276  */
14277 static void init_qpmap_table(struct hfi1_devdata *dd,
14278                              u32 first_ctxt,
14279                              u32 last_ctxt)
14280 {
14281         u64 reg = 0;
14282         u64 regno = RCV_QP_MAP_TABLE;
14283         int i;
14284         u64 ctxt = first_ctxt;
14285
14286         for (i = 0; i < 256; i++) {
14287                 reg |= ctxt << (8 * (i % 8));
14288                 ctxt++;
14289                 if (ctxt > last_ctxt)
14290                         ctxt = first_ctxt;
14291                 if (i % 8 == 7) {
14292                         write_csr(dd, regno, reg);
14293                         reg = 0;
14294                         regno += 8;
14295                 }
14296         }
14297
14298         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14299                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14300 }
14301
14302 struct rsm_map_table {
14303         u64 map[NUM_MAP_REGS];
14304         unsigned int used;
14305 };
14306
14307 struct rsm_rule_data {
14308         u8 offset;
14309         u8 pkt_type;
14310         u32 field1_off;
14311         u32 field2_off;
14312         u32 index1_off;
14313         u32 index1_width;
14314         u32 index2_off;
14315         u32 index2_width;
14316         u32 mask1;
14317         u32 value1;
14318         u32 mask2;
14319         u32 value2;
14320 };
14321
14322 /*
14323  * Return an initialized RMT map table for users to fill in.  OK if it
14324  * returns NULL, indicating no table.
14325  */
14326 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14327 {
14328         struct rsm_map_table *rmt;
14329         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14330
14331         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14332         if (rmt) {
14333                 memset(rmt->map, rxcontext, sizeof(rmt->map));
14334                 rmt->used = 0;
14335         }
14336
14337         return rmt;
14338 }
14339
14340 /*
14341  * Write the final RMT map table to the chip and free the table.  OK if
14342  * table is NULL.
14343  */
14344 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14345                                    struct rsm_map_table *rmt)
14346 {
14347         int i;
14348
14349         if (rmt) {
14350                 /* write table to chip */
14351                 for (i = 0; i < NUM_MAP_REGS; i++)
14352                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14353
14354                 /* enable RSM */
14355                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14356         }
14357 }
14358
14359 /*
14360  * Add a receive side mapping rule.
14361  */
14362 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14363                          struct rsm_rule_data *rrd)
14364 {
14365         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14366                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14367                   1ull << rule_index | /* enable bit */
14368                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14369         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14370                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14371                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14372                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14373                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14374                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14375                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14376         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14377                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14378                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14379                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14380                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14381 }
14382
14383 /*
14384  * Clear a receive side mapping rule.
14385  */
14386 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14387 {
14388         write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14389         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14390         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14391 }
14392
14393 /* return the number of RSM map table entries that will be used for QOS */
14394 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14395                            unsigned int *np)
14396 {
14397         int i;
14398         unsigned int m, n;
14399         u8 max_by_vl = 0;
14400
14401         /* is QOS active at all? */
14402         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14403             num_vls == 1 ||
14404             krcvqsset <= 1)
14405                 goto no_qos;
14406
14407         /* determine bits for qpn */
14408         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14409                 if (krcvqs[i] > max_by_vl)
14410                         max_by_vl = krcvqs[i];
14411         if (max_by_vl > 32)
14412                 goto no_qos;
14413         m = ilog2(__roundup_pow_of_two(max_by_vl));
14414
14415         /* determine bits for vl */
14416         n = ilog2(__roundup_pow_of_two(num_vls));
14417
14418         /* reject if too much is used */
14419         if ((m + n) > 7)
14420                 goto no_qos;
14421
14422         if (mp)
14423                 *mp = m;
14424         if (np)
14425                 *np = n;
14426
14427         return 1 << (m + n);
14428
14429 no_qos:
14430         if (mp)
14431                 *mp = 0;
14432         if (np)
14433                 *np = 0;
14434         return 0;
14435 }
14436
14437 /**
14438  * init_qos - init RX qos
14439  * @dd - device data
14440  * @rmt - RSM map table
14441  *
14442  * This routine initializes Rule 0 and the RSM map table to implement
14443  * quality of service (qos).
14444  *
14445  * If all of the limit tests succeed, qos is applied based on the array
14446  * interpretation of krcvqs where entry 0 is VL0.
14447  *
14448  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14449  * feed both the RSM map table and the single rule.
14450  */
14451 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14452 {
14453         struct rsm_rule_data rrd;
14454         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14455         unsigned int rmt_entries;
14456         u64 reg;
14457
14458         if (!rmt)
14459                 goto bail;
14460         rmt_entries = qos_rmt_entries(dd, &m, &n);
14461         if (rmt_entries == 0)
14462                 goto bail;
14463         qpns_per_vl = 1 << m;
14464
14465         /* enough room in the map table? */
14466         rmt_entries = 1 << (m + n);
14467         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14468                 goto bail;
14469
14470         /* add qos entries to the the RSM map table */
14471         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14472                 unsigned tctxt;
14473
14474                 for (qpn = 0, tctxt = ctxt;
14475                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14476                         unsigned idx, regoff, regidx;
14477
14478                         /* generate the index the hardware will produce */
14479                         idx = rmt->used + ((qpn << n) ^ i);
14480                         regoff = (idx % 8) * 8;
14481                         regidx = idx / 8;
14482                         /* replace default with context number */
14483                         reg = rmt->map[regidx];
14484                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14485                                 << regoff);
14486                         reg |= (u64)(tctxt++) << regoff;
14487                         rmt->map[regidx] = reg;
14488                         if (tctxt == ctxt + krcvqs[i])
14489                                 tctxt = ctxt;
14490                 }
14491                 ctxt += krcvqs[i];
14492         }
14493
14494         rrd.offset = rmt->used;
14495         rrd.pkt_type = 2;
14496         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14497         rrd.field2_off = LRH_SC_MATCH_OFFSET;
14498         rrd.index1_off = LRH_SC_SELECT_OFFSET;
14499         rrd.index1_width = n;
14500         rrd.index2_off = QPN_SELECT_OFFSET;
14501         rrd.index2_width = m + n;
14502         rrd.mask1 = LRH_BTH_MASK;
14503         rrd.value1 = LRH_BTH_VALUE;
14504         rrd.mask2 = LRH_SC_MASK;
14505         rrd.value2 = LRH_SC_VALUE;
14506
14507         /* add rule 0 */
14508         add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14509
14510         /* mark RSM map entries as used */
14511         rmt->used += rmt_entries;
14512         /* map everything else to the mcast/err/vl15 context */
14513         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14514         dd->qos_shift = n + 1;
14515         return;
14516 bail:
14517         dd->qos_shift = 1;
14518         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14519 }
14520
14521 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14522                                     struct rsm_map_table *rmt)
14523 {
14524         struct rsm_rule_data rrd;
14525         u64 reg;
14526         int i, idx, regoff, regidx;
14527         u8 offset;
14528         u32 total_cnt;
14529
14530         /* there needs to be enough room in the map table */
14531         total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
14532         if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14533                 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14534                 return;
14535         }
14536
14537         /*
14538          * RSM will extract the destination context as an index into the
14539          * map table.  The destination contexts are a sequential block
14540          * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14541          * Map entries are accessed as offset + extracted value.  Adjust
14542          * the added offset so this sequence can be placed anywhere in
14543          * the table - as long as the entries themselves do not wrap.
14544          * There are only enough bits in offset for the table size, so
14545          * start with that to allow for a "negative" offset.
14546          */
14547         offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14548                                                 (int)dd->first_dyn_alloc_ctxt);
14549
14550         for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14551                                 i < dd->num_rcv_contexts; i++, idx++) {
14552                 /* replace with identity mapping */
14553                 regoff = (idx % 8) * 8;
14554                 regidx = idx / 8;
14555                 reg = rmt->map[regidx];
14556                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14557                 reg |= (u64)i << regoff;
14558                 rmt->map[regidx] = reg;
14559         }
14560
14561         /*
14562          * For RSM intercept of Expected FECN packets:
14563          * o packet type 0 - expected
14564          * o match on F (bit 95), using select/match 1, and
14565          * o match on SH (bit 133), using select/match 2.
14566          *
14567          * Use index 1 to extract the 8-bit receive context from DestQP
14568          * (start at bit 64).  Use that as the RSM map table index.
14569          */
14570         rrd.offset = offset;
14571         rrd.pkt_type = 0;
14572         rrd.field1_off = 95;
14573         rrd.field2_off = 133;
14574         rrd.index1_off = 64;
14575         rrd.index1_width = 8;
14576         rrd.index2_off = 0;
14577         rrd.index2_width = 0;
14578         rrd.mask1 = 1;
14579         rrd.value1 = 1;
14580         rrd.mask2 = 1;
14581         rrd.value2 = 1;
14582
14583         /* add rule 1 */
14584         add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14585
14586         rmt->used += total_cnt;
14587 }
14588
14589 /* Initialize RSM for VNIC */
14590 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14591 {
14592         u8 i, j;
14593         u8 ctx_id = 0;
14594         u64 reg;
14595         u32 regoff;
14596         struct rsm_rule_data rrd;
14597
14598         if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14599                 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14600                            dd->vnic.rmt_start);
14601                 return;
14602         }
14603
14604         dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14605                 dd->vnic.rmt_start,
14606                 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14607
14608         /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14609         regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14610         reg = read_csr(dd, regoff);
14611         for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14612                 /* Update map register with vnic context */
14613                 j = (dd->vnic.rmt_start + i) % 8;
14614                 reg &= ~(0xffllu << (j * 8));
14615                 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14616                 /* Wrap up vnic ctx index */
14617                 ctx_id %= dd->vnic.num_ctxt;
14618                 /* Write back map register */
14619                 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14620                         dev_dbg(&(dd)->pcidev->dev,
14621                                 "Vnic rsm map reg[%d] =0x%llx\n",
14622                                 regoff - RCV_RSM_MAP_TABLE, reg);
14623
14624                         write_csr(dd, regoff, reg);
14625                         regoff += 8;
14626                         if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14627                                 reg = read_csr(dd, regoff);
14628                 }
14629         }
14630
14631         /* Add rule for vnic */
14632         rrd.offset = dd->vnic.rmt_start;
14633         rrd.pkt_type = 4;
14634         /* Match 16B packets */
14635         rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14636         rrd.mask1 = L2_TYPE_MASK;
14637         rrd.value1 = L2_16B_VALUE;
14638         /* Match ETH L4 packets */
14639         rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14640         rrd.mask2 = L4_16B_TYPE_MASK;
14641         rrd.value2 = L4_16B_ETH_VALUE;
14642         /* Calc context from veswid and entropy */
14643         rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14644         rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14645         rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14646         rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14647         add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14648
14649         /* Enable RSM if not already enabled */
14650         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14651 }
14652
14653 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14654 {
14655         clear_rsm_rule(dd, RSM_INS_VNIC);
14656
14657         /* Disable RSM if used only by vnic */
14658         if (dd->vnic.rmt_start == 0)
14659                 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14660 }
14661
14662 static int init_rxe(struct hfi1_devdata *dd)
14663 {
14664         struct rsm_map_table *rmt;
14665         u64 val;
14666
14667         /* enable all receive errors */
14668         write_csr(dd, RCV_ERR_MASK, ~0ull);
14669
14670         rmt = alloc_rsm_map_table(dd);
14671         if (!rmt)
14672                 return -ENOMEM;
14673
14674         /* set up QOS, including the QPN map table */
14675         init_qos(dd, rmt);
14676         init_user_fecn_handling(dd, rmt);
14677         complete_rsm_map_table(dd, rmt);
14678         /* record number of used rsm map entries for vnic */
14679         dd->vnic.rmt_start = rmt->used;
14680         kfree(rmt);
14681
14682         /*
14683          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14684          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14685          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14686          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14687          * Max_PayLoad_Size set to its minimum of 128.
14688          *
14689          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14690          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14691          * tune_pcie_caps() which is called after this routine.
14692          */
14693
14694         /* Have 16 bytes (4DW) of bypass header available in header queue */
14695         val = read_csr(dd, RCV_BYPASS);
14696         val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14697         val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14698                 RCV_BYPASS_HDR_SIZE_SHIFT);
14699         write_csr(dd, RCV_BYPASS, val);
14700         return 0;
14701 }
14702
14703 static void init_other(struct hfi1_devdata *dd)
14704 {
14705         /* enable all CCE errors */
14706         write_csr(dd, CCE_ERR_MASK, ~0ull);
14707         /* enable *some* Misc errors */
14708         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14709         /* enable all DC errors, except LCB */
14710         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14711         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14712 }
14713
14714 /*
14715  * Fill out the given AU table using the given CU.  A CU is defined in terms
14716  * AUs.  The table is a an encoding: given the index, how many AUs does that
14717  * represent?
14718  *
14719  * NOTE: Assumes that the register layout is the same for the
14720  * local and remote tables.
14721  */
14722 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14723                                u32 csr0to3, u32 csr4to7)
14724 {
14725         write_csr(dd, csr0to3,
14726                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14727                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14728                   2ull * cu <<
14729                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14730                   4ull * cu <<
14731                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14732         write_csr(dd, csr4to7,
14733                   8ull * cu <<
14734                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14735                   16ull * cu <<
14736                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14737                   32ull * cu <<
14738                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14739                   64ull * cu <<
14740                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14741 }
14742
14743 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14744 {
14745         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14746                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14747 }
14748
14749 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14750 {
14751         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14752                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14753 }
14754
14755 static void init_txe(struct hfi1_devdata *dd)
14756 {
14757         int i;
14758
14759         /* enable all PIO, SDMA, general, and Egress errors */
14760         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14761         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14762         write_csr(dd, SEND_ERR_MASK, ~0ull);
14763         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14764
14765         /* enable all per-context and per-SDMA engine errors */
14766         for (i = 0; i < chip_send_contexts(dd); i++)
14767                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14768         for (i = 0; i < chip_sdma_engines(dd); i++)
14769                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14770
14771         /* set the local CU to AU mapping */
14772         assign_local_cm_au_table(dd, dd->vcu);
14773
14774         /*
14775          * Set reasonable default for Credit Return Timer
14776          * Don't set on Simulator - causes it to choke.
14777          */
14778         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14779                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14780 }
14781
14782 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14783                        u16 jkey)
14784 {
14785         u8 hw_ctxt;
14786         u64 reg;
14787
14788         if (!rcd || !rcd->sc)
14789                 return -EINVAL;
14790
14791         hw_ctxt = rcd->sc->hw_context;
14792         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14793                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14794                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14795         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14796         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14797                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14798         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14799         /*
14800          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14801          */
14802         if (!is_ax(dd)) {
14803                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14804                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14805                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14806         }
14807
14808         /* Enable J_KEY check on receive context. */
14809         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14810                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14811                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14812         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14813
14814         return 0;
14815 }
14816
14817 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14818 {
14819         u8 hw_ctxt;
14820         u64 reg;
14821
14822         if (!rcd || !rcd->sc)
14823                 return -EINVAL;
14824
14825         hw_ctxt = rcd->sc->hw_context;
14826         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14827         /*
14828          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14829          * This check would not have been enabled for A0 h/w, see
14830          * set_ctxt_jkey().
14831          */
14832         if (!is_ax(dd)) {
14833                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14834                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14835                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14836         }
14837         /* Turn off the J_KEY on the receive side */
14838         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14839
14840         return 0;
14841 }
14842
14843 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14844                        u16 pkey)
14845 {
14846         u8 hw_ctxt;
14847         u64 reg;
14848
14849         if (!rcd || !rcd->sc)
14850                 return -EINVAL;
14851
14852         hw_ctxt = rcd->sc->hw_context;
14853         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14854                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14855         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14856         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14857         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14858         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14859         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14860
14861         return 0;
14862 }
14863
14864 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14865 {
14866         u8 hw_ctxt;
14867         u64 reg;
14868
14869         if (!ctxt || !ctxt->sc)
14870                 return -EINVAL;
14871
14872         hw_ctxt = ctxt->sc->hw_context;
14873         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14874         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14875         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14876         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14877
14878         return 0;
14879 }
14880
14881 /*
14882  * Start doing the clean up the the chip. Our clean up happens in multiple
14883  * stages and this is just the first.
14884  */
14885 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14886 {
14887         aspm_exit(dd);
14888         free_cntrs(dd);
14889         free_rcverr(dd);
14890         finish_chip_resources(dd);
14891 }
14892
14893 #define HFI_BASE_GUID(dev) \
14894         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14895
14896 /*
14897  * Information can be shared between the two HFIs on the same ASIC
14898  * in the same OS.  This function finds the peer device and sets
14899  * up a shared structure.
14900  */
14901 static int init_asic_data(struct hfi1_devdata *dd)
14902 {
14903         unsigned long flags;
14904         struct hfi1_devdata *tmp, *peer = NULL;
14905         struct hfi1_asic_data *asic_data;
14906         int ret = 0;
14907
14908         /* pre-allocate the asic structure in case we are the first device */
14909         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14910         if (!asic_data)
14911                 return -ENOMEM;
14912
14913         spin_lock_irqsave(&hfi1_devs_lock, flags);
14914         /* Find our peer device */
14915         list_for_each_entry(tmp, &hfi1_dev_list, list) {
14916                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14917                     dd->unit != tmp->unit) {
14918                         peer = tmp;
14919                         break;
14920                 }
14921         }
14922
14923         if (peer) {
14924                 /* use already allocated structure */
14925                 dd->asic_data = peer->asic_data;
14926                 kfree(asic_data);
14927         } else {
14928                 dd->asic_data = asic_data;
14929                 mutex_init(&dd->asic_data->asic_resource_mutex);
14930         }
14931         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14932         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14933
14934         /* first one through - set up i2c devices */
14935         if (!peer)
14936                 ret = set_up_i2c(dd, dd->asic_data);
14937
14938         return ret;
14939 }
14940
14941 /*
14942  * Set dd->boardname.  Use a generic name if a name is not returned from
14943  * EFI variable space.
14944  *
14945  * Return 0 on success, -ENOMEM if space could not be allocated.
14946  */
14947 static int obtain_boardname(struct hfi1_devdata *dd)
14948 {
14949         /* generic board description */
14950         const char generic[] =
14951                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14952         unsigned long size;
14953         int ret;
14954
14955         ret = read_hfi1_efi_var(dd, "description", &size,
14956                                 (void **)&dd->boardname);
14957         if (ret) {
14958                 dd_dev_info(dd, "Board description not found\n");
14959                 /* use generic description */
14960                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14961                 if (!dd->boardname)
14962                         return -ENOMEM;
14963         }
14964         return 0;
14965 }
14966
14967 /*
14968  * Check the interrupt registers to make sure that they are mapped correctly.
14969  * It is intended to help user identify any mismapping by VMM when the driver
14970  * is running in a VM. This function should only be called before interrupt
14971  * is set up properly.
14972  *
14973  * Return 0 on success, -EINVAL on failure.
14974  */
14975 static int check_int_registers(struct hfi1_devdata *dd)
14976 {
14977         u64 reg;
14978         u64 all_bits = ~(u64)0;
14979         u64 mask;
14980
14981         /* Clear CceIntMask[0] to avoid raising any interrupts */
14982         mask = read_csr(dd, CCE_INT_MASK);
14983         write_csr(dd, CCE_INT_MASK, 0ull);
14984         reg = read_csr(dd, CCE_INT_MASK);
14985         if (reg)
14986                 goto err_exit;
14987
14988         /* Clear all interrupt status bits */
14989         write_csr(dd, CCE_INT_CLEAR, all_bits);
14990         reg = read_csr(dd, CCE_INT_STATUS);
14991         if (reg)
14992                 goto err_exit;
14993
14994         /* Set all interrupt status bits */
14995         write_csr(dd, CCE_INT_FORCE, all_bits);
14996         reg = read_csr(dd, CCE_INT_STATUS);
14997         if (reg != all_bits)
14998                 goto err_exit;
14999
15000         /* Restore the interrupt mask */
15001         write_csr(dd, CCE_INT_CLEAR, all_bits);
15002         write_csr(dd, CCE_INT_MASK, mask);
15003
15004         return 0;
15005 err_exit:
15006         write_csr(dd, CCE_INT_MASK, mask);
15007         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
15008         return -EINVAL;
15009 }
15010
15011 /**
15012  * Allocate and initialize the device structure for the hfi.
15013  * @dev: the pci_dev for hfi1_ib device
15014  * @ent: pci_device_id struct for this dev
15015  *
15016  * Also allocates, initializes, and returns the devdata struct for this
15017  * device instance
15018  *
15019  * This is global, and is called directly at init to set up the
15020  * chip-specific function pointers for later use.
15021  */
15022 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
15023                                   const struct pci_device_id *ent)
15024 {
15025         struct hfi1_devdata *dd;
15026         struct hfi1_pportdata *ppd;
15027         u64 reg;
15028         int i, ret;
15029         static const char * const inames[] = { /* implementation names */
15030                 "RTL silicon",
15031                 "RTL VCS simulation",
15032                 "RTL FPGA emulation",
15033                 "Functional simulator"
15034         };
15035         struct pci_dev *parent = pdev->bus->self;
15036         u32 sdma_engines;
15037
15038         dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
15039                                 sizeof(struct hfi1_pportdata));
15040         if (IS_ERR(dd))
15041                 goto bail;
15042         sdma_engines = chip_sdma_engines(dd);
15043         ppd = dd->pport;
15044         for (i = 0; i < dd->num_pports; i++, ppd++) {
15045                 int vl;
15046                 /* init common fields */
15047                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15048                 /* DC supports 4 link widths */
15049                 ppd->link_width_supported =
15050                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15051                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15052                 ppd->link_width_downgrade_supported =
15053                         ppd->link_width_supported;
15054                 /* start out enabling only 4X */
15055                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15056                 ppd->link_width_downgrade_enabled =
15057                                         ppd->link_width_downgrade_supported;
15058                 /* link width active is 0 when link is down */
15059                 /* link width downgrade active is 0 when link is down */
15060
15061                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15062                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
15063                         dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
15064                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
15065                         num_vls = HFI1_MAX_VLS_SUPPORTED;
15066                 }
15067                 ppd->vls_supported = num_vls;
15068                 ppd->vls_operational = ppd->vls_supported;
15069                 /* Set the default MTU. */
15070                 for (vl = 0; vl < num_vls; vl++)
15071                         dd->vld[vl].mtu = hfi1_max_mtu;
15072                 dd->vld[15].mtu = MAX_MAD_PACKET;
15073                 /*
15074                  * Set the initial values to reasonable default, will be set
15075                  * for real when link is up.
15076                  */
15077                 ppd->overrun_threshold = 0x4;
15078                 ppd->phy_error_threshold = 0xf;
15079                 ppd->port_crc_mode_enabled = link_crc_mask;
15080                 /* initialize supported LTP CRC mode */
15081                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15082                 /* initialize enabled LTP CRC mode */
15083                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15084                 /* start in offline */
15085                 ppd->host_link_state = HLS_DN_OFFLINE;
15086                 init_vl_arb_caches(ppd);
15087         }
15088
15089         /*
15090          * Do remaining PCIe setup and save PCIe values in dd.
15091          * Any error printing is already done by the init code.
15092          * On return, we have the chip mapped.
15093          */
15094         ret = hfi1_pcie_ddinit(dd, pdev);
15095         if (ret < 0)
15096                 goto bail_free;
15097
15098         /* Save PCI space registers to rewrite after device reset */
15099         ret = save_pci_variables(dd);
15100         if (ret < 0)
15101                 goto bail_cleanup;
15102
15103         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15104                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15105         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15106                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
15107
15108         /*
15109          * Check interrupt registers mapping if the driver has no access to
15110          * the upstream component. In this case, it is likely that the driver
15111          * is running in a VM.
15112          */
15113         if (!parent) {
15114                 ret = check_int_registers(dd);
15115                 if (ret)
15116                         goto bail_cleanup;
15117         }
15118
15119         /*
15120          * obtain the hardware ID - NOT related to unit, which is a
15121          * software enumeration
15122          */
15123         reg = read_csr(dd, CCE_REVISION2);
15124         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15125                                         & CCE_REVISION2_HFI_ID_MASK;
15126         /* the variable size will remove unwanted bits */
15127         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15128         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15129         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15130                     dd->icode < ARRAY_SIZE(inames) ?
15131                     inames[dd->icode] : "unknown", (int)dd->irev);
15132
15133         /* speeds the hardware can support */
15134         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15135         /* speeds allowed to run at */
15136         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15137         /* give a reasonable active value, will be set on link up */
15138         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15139
15140         /* fix up link widths for emulation _p */
15141         ppd = dd->pport;
15142         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15143                 ppd->link_width_supported =
15144                         ppd->link_width_enabled =
15145                         ppd->link_width_downgrade_supported =
15146                         ppd->link_width_downgrade_enabled =
15147                                 OPA_LINK_WIDTH_1X;
15148         }
15149         /* insure num_vls isn't larger than number of sdma engines */
15150         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15151                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15152                            num_vls, sdma_engines);
15153                 num_vls = sdma_engines;
15154                 ppd->vls_supported = sdma_engines;
15155                 ppd->vls_operational = ppd->vls_supported;
15156         }
15157
15158         /*
15159          * Convert the ns parameter to the 64 * cclocks used in the CSR.
15160          * Limit the max if larger than the field holds.  If timeout is
15161          * non-zero, then the calculated field will be at least 1.
15162          *
15163          * Must be after icode is set up - the cclock rate depends
15164          * on knowing the hardware being used.
15165          */
15166         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15167         if (dd->rcv_intr_timeout_csr >
15168                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15169                 dd->rcv_intr_timeout_csr =
15170                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15171         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15172                 dd->rcv_intr_timeout_csr = 1;
15173
15174         /* needs to be done before we look for the peer device */
15175         read_guid(dd);
15176
15177         /* set up shared ASIC data with peer device */
15178         ret = init_asic_data(dd);
15179         if (ret)
15180                 goto bail_cleanup;
15181
15182         /* obtain chip sizes, reset chip CSRs */
15183         ret = init_chip(dd);
15184         if (ret)
15185                 goto bail_cleanup;
15186
15187         /* read in the PCIe link speed information */
15188         ret = pcie_speeds(dd);
15189         if (ret)
15190                 goto bail_cleanup;
15191
15192         /* call before get_platform_config(), after init_chip_resources() */
15193         ret = eprom_init(dd);
15194         if (ret)
15195                 goto bail_free_rcverr;
15196
15197         /* Needs to be called before hfi1_firmware_init */
15198         get_platform_config(dd);
15199
15200         /* read in firmware */
15201         ret = hfi1_firmware_init(dd);
15202         if (ret)
15203                 goto bail_cleanup;
15204
15205         /*
15206          * In general, the PCIe Gen3 transition must occur after the
15207          * chip has been idled (so it won't initiate any PCIe transactions
15208          * e.g. an interrupt) and before the driver changes any registers
15209          * (the transition will reset the registers).
15210          *
15211          * In particular, place this call after:
15212          * - init_chip()     - the chip will not initiate any PCIe transactions
15213          * - pcie_speeds()   - reads the current link speed
15214          * - hfi1_firmware_init() - the needed firmware is ready to be
15215          *                          downloaded
15216          */
15217         ret = do_pcie_gen3_transition(dd);
15218         if (ret)
15219                 goto bail_cleanup;
15220
15221         /* start setting dd values and adjusting CSRs */
15222         init_early_variables(dd);
15223
15224         parse_platform_config(dd);
15225
15226         ret = obtain_boardname(dd);
15227         if (ret)
15228                 goto bail_cleanup;
15229
15230         snprintf(dd->boardversion, BOARD_VERS_MAX,
15231                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15232                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15233                  (u32)dd->majrev,
15234                  (u32)dd->minrev,
15235                  (dd->revision >> CCE_REVISION_SW_SHIFT)
15236                     & CCE_REVISION_SW_MASK);
15237
15238         ret = set_up_context_variables(dd);
15239         if (ret)
15240                 goto bail_cleanup;
15241
15242         /* set initial RXE CSRs */
15243         ret = init_rxe(dd);
15244         if (ret)
15245                 goto bail_cleanup;
15246
15247         /* set initial TXE CSRs */
15248         init_txe(dd);
15249         /* set initial non-RXE, non-TXE CSRs */
15250         init_other(dd);
15251         /* set up KDETH QP prefix in both RX and TX CSRs */
15252         init_kdeth_qp(dd);
15253
15254         ret = hfi1_dev_affinity_init(dd);
15255         if (ret)
15256                 goto bail_cleanup;
15257
15258         /* send contexts must be set up before receive contexts */
15259         ret = init_send_contexts(dd);
15260         if (ret)
15261                 goto bail_cleanup;
15262
15263         ret = hfi1_create_kctxts(dd);
15264         if (ret)
15265                 goto bail_cleanup;
15266
15267         /*
15268          * Initialize aspm, to be done after gen3 transition and setting up
15269          * contexts and before enabling interrupts
15270          */
15271         aspm_init(dd);
15272
15273         ret = init_pervl_scs(dd);
15274         if (ret)
15275                 goto bail_cleanup;
15276
15277         /* sdma init */
15278         for (i = 0; i < dd->num_pports; ++i) {
15279                 ret = sdma_init(dd, i);
15280                 if (ret)
15281                         goto bail_cleanup;
15282         }
15283
15284         /* use contexts created by hfi1_create_kctxts */
15285         ret = set_up_interrupts(dd);
15286         if (ret)
15287                 goto bail_cleanup;
15288
15289         ret = hfi1_comp_vectors_set_up(dd);
15290         if (ret)
15291                 goto bail_clear_intr;
15292
15293         /* set up LCB access - must be after set_up_interrupts() */
15294         init_lcb_access(dd);
15295
15296         /*
15297          * Serial number is created from the base guid:
15298          * [27:24] = base guid [38:35]
15299          * [23: 0] = base guid [23: 0]
15300          */
15301         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15302                  (dd->base_guid & 0xFFFFFF) |
15303                      ((dd->base_guid >> 11) & 0xF000000));
15304
15305         dd->oui1 = dd->base_guid >> 56 & 0xFF;
15306         dd->oui2 = dd->base_guid >> 48 & 0xFF;
15307         dd->oui3 = dd->base_guid >> 40 & 0xFF;
15308
15309         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15310         if (ret)
15311                 goto bail_clear_intr;
15312
15313         thermal_init(dd);
15314
15315         ret = init_cntrs(dd);
15316         if (ret)
15317                 goto bail_clear_intr;
15318
15319         ret = init_rcverr(dd);
15320         if (ret)
15321                 goto bail_free_cntrs;
15322
15323         init_completion(&dd->user_comp);
15324
15325         /* The user refcount starts with one to inidicate an active device */
15326         atomic_set(&dd->user_refcount, 1);
15327
15328         goto bail;
15329
15330 bail_free_rcverr:
15331         free_rcverr(dd);
15332 bail_free_cntrs:
15333         free_cntrs(dd);
15334 bail_clear_intr:
15335         hfi1_comp_vectors_clean_up(dd);
15336         hfi1_clean_up_interrupts(dd);
15337 bail_cleanup:
15338         hfi1_pcie_ddcleanup(dd);
15339 bail_free:
15340         hfi1_free_devdata(dd);
15341         dd = ERR_PTR(ret);
15342 bail:
15343         return dd;
15344 }
15345
15346 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15347                         u32 dw_len)
15348 {
15349         u32 delta_cycles;
15350         u32 current_egress_rate = ppd->current_egress_rate;
15351         /* rates here are in units of 10^6 bits/sec */
15352
15353         if (desired_egress_rate == -1)
15354                 return 0; /* shouldn't happen */
15355
15356         if (desired_egress_rate >= current_egress_rate)
15357                 return 0; /* we can't help go faster, only slower */
15358
15359         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15360                         egress_cycles(dw_len * 4, current_egress_rate);
15361
15362         return (u16)delta_cycles;
15363 }
15364
15365 /**
15366  * create_pbc - build a pbc for transmission
15367  * @flags: special case flags or-ed in built pbc
15368  * @srate: static rate
15369  * @vl: vl
15370  * @dwlen: dword length (header words + data words + pbc words)
15371  *
15372  * Create a PBC with the given flags, rate, VL, and length.
15373  *
15374  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15375  * for verbs, which does not use this PSM feature.  The lone other caller
15376  * is for the diagnostic interface which calls this if the user does not
15377  * supply their own PBC.
15378  */
15379 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15380                u32 dw_len)
15381 {
15382         u64 pbc, delay = 0;
15383
15384         if (unlikely(srate_mbs))
15385                 delay = delay_cycles(ppd, srate_mbs, dw_len);
15386
15387         pbc = flags
15388                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15389                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15390                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15391                 | (dw_len & PBC_LENGTH_DWS_MASK)
15392                         << PBC_LENGTH_DWS_SHIFT;
15393
15394         return pbc;
15395 }
15396
15397 #define SBUS_THERMAL    0x4f
15398 #define SBUS_THERM_MONITOR_MODE 0x1
15399
15400 #define THERM_FAILURE(dev, ret, reason) \
15401         dd_dev_err((dd),                                                \
15402                    "Thermal sensor initialization failed: %s (%d)\n",   \
15403                    (reason), (ret))
15404
15405 /*
15406  * Initialize the thermal sensor.
15407  *
15408  * After initialization, enable polling of thermal sensor through
15409  * SBus interface. In order for this to work, the SBus Master
15410  * firmware has to be loaded due to the fact that the HW polling
15411  * logic uses SBus interrupts, which are not supported with
15412  * default firmware. Otherwise, no data will be returned through
15413  * the ASIC_STS_THERM CSR.
15414  */
15415 static int thermal_init(struct hfi1_devdata *dd)
15416 {
15417         int ret = 0;
15418
15419         if (dd->icode != ICODE_RTL_SILICON ||
15420             check_chip_resource(dd, CR_THERM_INIT, NULL))
15421                 return ret;
15422
15423         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15424         if (ret) {
15425                 THERM_FAILURE(dd, ret, "Acquire SBus");
15426                 return ret;
15427         }
15428
15429         dd_dev_info(dd, "Initializing thermal sensor\n");
15430         /* Disable polling of thermal readings */
15431         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15432         msleep(100);
15433         /* Thermal Sensor Initialization */
15434         /*    Step 1: Reset the Thermal SBus Receiver */
15435         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15436                                 RESET_SBUS_RECEIVER, 0);
15437         if (ret) {
15438                 THERM_FAILURE(dd, ret, "Bus Reset");
15439                 goto done;
15440         }
15441         /*    Step 2: Set Reset bit in Thermal block */
15442         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15443                                 WRITE_SBUS_RECEIVER, 0x1);
15444         if (ret) {
15445                 THERM_FAILURE(dd, ret, "Therm Block Reset");
15446                 goto done;
15447         }
15448         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15449         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15450                                 WRITE_SBUS_RECEIVER, 0x32);
15451         if (ret) {
15452                 THERM_FAILURE(dd, ret, "Write Clock Div");
15453                 goto done;
15454         }
15455         /*    Step 4: Select temperature mode */
15456         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15457                                 WRITE_SBUS_RECEIVER,
15458                                 SBUS_THERM_MONITOR_MODE);
15459         if (ret) {
15460                 THERM_FAILURE(dd, ret, "Write Mode Sel");
15461                 goto done;
15462         }
15463         /*    Step 5: De-assert block reset and start conversion */
15464         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15465                                 WRITE_SBUS_RECEIVER, 0x2);
15466         if (ret) {
15467                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15468                 goto done;
15469         }
15470         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15471         msleep(22);
15472
15473         /* Enable polling of thermal readings */
15474         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15475
15476         /* Set initialized flag */
15477         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15478         if (ret)
15479                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15480
15481 done:
15482         release_chip_resource(dd, CR_SBUS);
15483         return ret;
15484 }
15485
15486 static void handle_temp_err(struct hfi1_devdata *dd)
15487 {
15488         struct hfi1_pportdata *ppd = &dd->pport[0];
15489         /*
15490          * Thermal Critical Interrupt
15491          * Put the device into forced freeze mode, take link down to
15492          * offline, and put DC into reset.
15493          */
15494         dd_dev_emerg(dd,
15495                      "Critical temperature reached! Forcing device into freeze mode!\n");
15496         dd->flags |= HFI1_FORCED_FREEZE;
15497         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15498         /*
15499          * Shut DC down as much and as quickly as possible.
15500          *
15501          * Step 1: Take the link down to OFFLINE. This will cause the
15502          *         8051 to put the Serdes in reset. However, we don't want to
15503          *         go through the entire link state machine since we want to
15504          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15505          *         but rather an attempt to save the chip.
15506          *         Code below is almost the same as quiet_serdes() but avoids
15507          *         all the extra work and the sleeps.
15508          */
15509         ppd->driver_link_ready = 0;
15510         ppd->link_enabled = 0;
15511         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15512                                 PLS_OFFLINE);
15513         /*
15514          * Step 2: Shutdown LCB and 8051
15515          *         After shutdown, do not restore DC_CFG_RESET value.
15516          */
15517         dc_shutdown(dd);
15518 }